code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
### EXPERIMENT FLAGS ITERATION
flags = {'SC4', 'EPI1', '!AV'}
### EXPERIMENT EXECUTION STUB
import sys
sys.path.append('../../../../PatchSim/')
sys.path.append('../../../../school_closures/')
import patchsim as sim
import pandas as pd
import numpy as np
import multiprocessing
import sc_variants as sc
from copy import deepcopy
np.random.seed(42)
### PREP SCENARIO BY FLAGS
epidemic = [flag for flag in flags if flag.startswith('EPI')][0]
schoolClosures = [flag for flag in flags if flag.startswith('SC')]
if len(schoolClosures) != 0:
schoolClosure = schoolClosures[0]
else:
schoolClosure = 'null'
#### SET NUMBER OF REPS AND THREADS
n = 100
threads = 50
### INITIAL LOADS OF PARAMS
print("Loading params")
configs = sim.read_config('config.patchsim')
patch_df = sim.load_patch(configs)
params = sim.load_params(configs, patch_df)
Theta = sim.load_Theta(configs, patch_df)
seeds = sim.load_seed(configs, params, patch_df)
if schoolClosure in {'SC2','SC4'}:
scMethod = sc.NetIntervention(configs)
elif schoolClosure in {'SC1','SC3'}:
scMethod = sc.NetInterventionAdaptive(configs)
else:
scMethod = None
### EXPERIMENT EXECUTION
def runPatchsimSub(args):
"""Runs experiment in parallel using modified copies of params and configs"""
(i,betaOut) = args
print("Starting run",i)
configsOut = deepcopy(configs)
paramsOut = deepcopy(params)
configsOut['ExposureRate'] = betaOut
paramsOut['beta'] = np.where(params['beta']==1337,
betaOut,
params['beta'])
df = sim.run_disease_simulation(configsOut,
patch_df,
params=paramsOut,
Theta=Theta,
seeds=seeds,
write_epi=False,
return_epi=True,
intervene_step=scMethod)
df.loc[:,'sample'] = i
df.index.rename('id',inplace=True)
return df
betaOut = {'EPI1':1.29e-06,
'EPI2':1.60e-06}[epidemic]
stdDev = {'EPI1':7.08e-08,
'EPI2':8.60e-08}[epidemic]
argsList = [(i,np.random.normal(betaOut,stdDev)) for i in range(n)]
print("Starting runs with beta %s and stddev %s" % (betaOut,stdDev))
with multiprocessing.Pool(threads) as mp_pool:
results = mp_pool.map(runPatchsimSub, argsList)
results = pd.concat(results)
results.to_csv('MergedSamples.csv') | [
"sys.path.append",
"copy.deepcopy",
"numpy.random.seed",
"patchsim.load_params",
"patchsim.run_disease_simulation",
"numpy.where",
"patchsim.load_Theta",
"sc_variants.NetInterventionAdaptive",
"multiprocessing.Pool",
"numpy.random.normal",
"patchsim.read_config",
"sc_variants.NetIntervention",... | [((106, 146), 'sys.path.append', 'sys.path.append', (['"""../../../../PatchSim/"""'], {}), "('../../../../PatchSim/')\n", (121, 146), False, 'import sys\n'), ((147, 194), 'sys.path.append', 'sys.path.append', (['"""../../../../school_closures/"""'], {}), "('../../../../school_closures/')\n", (162, 194), False, 'import sys\n'), ((334, 352), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (348, 352), True, 'import numpy as np\n'), ((750, 784), 'patchsim.read_config', 'sim.read_config', (['"""config.patchsim"""'], {}), "('config.patchsim')\n", (765, 784), True, 'import patchsim as sim\n'), ((796, 819), 'patchsim.load_patch', 'sim.load_patch', (['configs'], {}), '(configs)\n', (810, 819), True, 'import patchsim as sim\n'), ((829, 863), 'patchsim.load_params', 'sim.load_params', (['configs', 'patch_df'], {}), '(configs, patch_df)\n', (844, 863), True, 'import patchsim as sim\n'), ((872, 905), 'patchsim.load_Theta', 'sim.load_Theta', (['configs', 'patch_df'], {}), '(configs, patch_df)\n', (886, 905), True, 'import patchsim as sim\n'), ((914, 954), 'patchsim.load_seed', 'sim.load_seed', (['configs', 'params', 'patch_df'], {}), '(configs, params, patch_df)\n', (927, 954), True, 'import patchsim as sim\n'), ((2467, 2485), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (2476, 2485), True, 'import pandas as pd\n'), ((1007, 1034), 'sc_variants.NetIntervention', 'sc.NetIntervention', (['configs'], {}), '(configs)\n', (1025, 1034), True, 'import sc_variants as sc\n'), ((1354, 1371), 'copy.deepcopy', 'deepcopy', (['configs'], {}), '(configs)\n', (1362, 1371), False, 'from copy import deepcopy\n'), ((1388, 1404), 'copy.deepcopy', 'deepcopy', (['params'], {}), '(params)\n', (1396, 1404), False, 'from copy import deepcopy\n'), ((1470, 1527), 'numpy.where', 'np.where', (["(params['beta'] == 1337)", 'betaOut', "params['beta']"], {}), "(params['beta'] == 1337, betaOut, params['beta'])\n", (1478, 1527), True, 'import numpy as np\n'), ((1592, 1753), 'patchsim.run_disease_simulation', 'sim.run_disease_simulation', (['configsOut', 'patch_df'], {'params': 'paramsOut', 'Theta': 'Theta', 'seeds': 'seeds', 'write_epi': '(False)', 'return_epi': '(True)', 'intervene_step': 'scMethod'}), '(configsOut, patch_df, params=paramsOut, Theta=\n Theta, seeds=seeds, write_epi=False, return_epi=True, intervene_step=\n scMethod)\n', (1618, 1753), True, 'import patchsim as sim\n'), ((2358, 2387), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {}), '(threads)\n', (2378, 2387), False, 'import multiprocessing\n'), ((1087, 1122), 'sc_variants.NetInterventionAdaptive', 'sc.NetInterventionAdaptive', (['configs'], {}), '(configs)\n', (1113, 1122), True, 'import sc_variants as sc\n'), ((2229, 2262), 'numpy.random.normal', 'np.random.normal', (['betaOut', 'stdDev'], {}), '(betaOut, stdDev)\n', (2245, 2262), True, 'import numpy as np\n')] |
"""
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by <NAME>
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by <NAME>
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by <NAME> and (mainly) <NAME>.
* Subclassing of the base `ndarray` 2006 by <NAME>
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by <NAME> (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: <NAME>
"""
# pylint: disable-msg=E1002
import builtins
import inspect
import operator
import warnings
import textwrap
import re
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import (
getargspec, formatargspec, long, unicode, bytes
)
from numpy import expand_dims
from numpy.core.numeric import normalize_axis_tuple
from numpy.core._internal import recursive
from numpy.compat import pickle
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',
'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
'less', 'less_equal', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where', 'zeros',
]
MaskType = np.bool_
nomask = MaskType(0)
class MaskedArrayFutureWarning(FutureWarning):
pass
def _deprecate_argsort_axis(arr):
"""
Adjust the axis passed to argsort, warning if necessary
Parameters
----------
arr
The array which argsort was called on
np.ma.argsort has a long-term bug where the default of the axis argument
is wrong (gh-8701), which now must be kept for backwards compatibility.
Thankfully, this only makes a difference when arrays are 2- or more-
dimensional, so we only need a warning then.
"""
if arr.ndim <= 1:
# no warning needed - but switch to -1 anyway, to avoid surprising
# subclasses, which are more likely to implement scalar axes.
return -1
else:
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
warnings.warn(
"In the future the default for argsort will be axis=-1, not the "
"current None, to match its documentation and np.argsort. "
"Explicitly pass -1 or None to silence this warning.",
MaskedArrayFutureWarning, stacklevel=3)
return None
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc))
notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note)
return ''.join(notesplit[:1] + [notedoc] + notesplit[1:])
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError:
sig = ''
return sig
###############################################################################
# Exceptions #
###############################################################################
class MAError(Exception):
"""
Class for masked array related errors.
"""
pass
class MaskError(MAError):
"""
Class for mask related errors.
"""
pass
###############################################################################
# Filling options #
###############################################################################
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c': 1.e20 + 0.0j,
'f': 1.e20,
'i': 999999,
'O': '?',
'S': b'N/A',
'u': 999999,
'V': b'???',
'U': u'N/A'
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
float_types_list = [np.half, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble]
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in float_types_list[:4]])
max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in float_types_list[:4]])
min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]])
del float_types_list
def _recursive_fill_value(dtype, f):
"""
Recursively produce a fill value for `dtype`, calling f on scalar dtypes
"""
if dtype.names is not None:
vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)
return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
elif dtype.subdtype:
subtype, shape = dtype.subdtype
subval = _recursive_fill_value(subtype, f)
return np.full(shape, subval)
else:
return f(dtype)
def _get_dtype_of(obj):
""" Convert the argument for *_fill_value into a dtype """
if isinstance(obj, np.dtype):
return obj
elif hasattr(obj, 'dtype'):
return obj.dtype
else:
return np.asanyarray(obj).dtype
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
For structured types, a structured scalar is returned, with each field the
default fill value for its type.
For subarray types, the fill value is an array of the same size containing
the default scalar fill value.
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
def _scalar_fill_value(dtype):
if dtype.kind in 'Mm':
return default_filler.get(dtype.str[1:], '?')
else:
return default_filler.get(dtype.kind, '?')
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def _extremum_fill_value(obj, extremum, extremum_name):
def _scalar_fill_value(dtype):
try:
return extremum[dtype]
except KeyError as e:
raise TypeError(
f"Unsuitable type {dtype} for calculating {extremum_name}."
) from None
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
return _extremum_fill_value(obj, min_filler, "minimum")
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
return _extremum_fill_value(obj, max_filler, "maximum")
def _recursive_set_fill_value(fillvalue, dt):
"""
Create a fill value for a structured dtype.
Parameters
----------
fillvalue: scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt: dtype
The structured dtype for which to create the fill value.
Returns
-------
val: tuple
A tuple of values corresponding to the structured fill value.
"""
fillvalue = np.resize(fillvalue, len(dt.names))
output_value = []
for (fval, name) in zip(fillvalue, dt.names):
cdtype = dt[name]
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
if cdtype.names is not None:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
if fill_value is None:
fill_value = default_fill_value(ndtype)
elif ndtype.names is not None:
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except ValueError as e:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, ndtype)) from e
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
else:
if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'):
# Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
# Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except (OverflowError, ValueError) as e:
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
raise TypeError(err_msg % (fill_value, ndtype)) from e
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data=[--, --, --, 3, 4],
mask=[ True, True, True, False, False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data=[--, --, --, 3, 4],
mask=[ True, True, True, False, False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = list(range(5))
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : array_like, optional.
Can be scalar or non-scalar. If non-scalar, the
resulting filled array should be broadcastable
over input array. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
>>> x.filled(fill_value=333)
array([[333, 1, 2],
[333, 4, 5],
[ 6, 7, 8]])
>>> x.filled(fill_value=np.arange(3))
array([[0, 1, 2],
[0, 4, 5],
[6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data=[--, -1.0, nan, inf],
mask=[ True, False, False, False],
fill_value=1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data=[--, -1.0, --, --],
mask=[ True, False, True, True],
fill_value=1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
>>> x.data
array([ 1., -1., nan, inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
def is_string_or_list_of_strings(val):
return (isinstance(val, str) or
(isinstance(val, list) and val and
builtins.all(isinstance(s, str) for s in val)))
###############################################################################
# Ufuncs #
###############################################################################
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if a > b:
(a, b) = (b, a)
self.a = a
self.b = b
def __call__(self, x):
"Execute the call behavior."
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(invalid='ignore'):
return umath.logical_or(umath.greater(x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""
Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""
Define a domain for safe division.
"""
def __init__(self, tolerance=None):
self.tolerance = tolerance
def __call__(self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
with np.errstate(invalid='ignore'):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""
DomainGreater(v)(x) is True where x <= v.
"""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""
DomainGreaterEqual(v)(x) is True where x < v.
"""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(x, self.critical_value)
class _MaskedUFunc:
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
self.__name__ = ufunc.__name__
def __str__(self):
return f"Masked version of {self.f}"
class _MaskedUnaryOperation(_MaskedUFunc):
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__(self, mufunc, fill=0, domain=None):
super(_MaskedUnaryOperation, self).__init__(mufunc)
self.fill = fill
self.domain = domain
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(a)
return masked_result
class _MaskedBinaryOperation(_MaskedUFunc):
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, mbfunc, fillx=0, filly=0):
"""
abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super(_MaskedBinaryOperation, self).__init__(mbfunc)
self.fillx = fillx
self.filly = filly
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"""
Execute the call behavior.
"""
# Get the data, as ndarray
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# Get the mask for the result
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m is not nomask and m.any():
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, da, casting='unsafe', where=m)
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def reduce(self, target, axis=0, dtype=None):
"""
Reduce `target` along the given `axis`.
"""
tclass = get_masked_subclass(target)
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=True)
m.shape = (1,)
if m is nomask:
tr = self.f.reduce(t, axis)
mr = nomask
else:
tr = self.f.reduce(t, axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
if not tr.shape:
if mr:
return masked
else:
return tr
masked_tr = tr.view(tclass)
masked_tr._mask = mr
return masked_tr
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def accumulate(self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
tclass = get_masked_subclass(target)
t = filled(target, self.filly)
result = self.f.accumulate(t, axis)
masked_result = result.view(tclass)
return masked_result
class _DomainedBinaryOperation(_MaskedUFunc):
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super(_DomainedBinaryOperation, self).__init__(dbfunc)
self.domain = domain
self.fillx = fillx
self.filly = filly
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
if not m.ndim:
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
###############################################################################
# Mask creation functions #
###############################################################################
def _replace_dtype_fields_recursive(dtype, primitive_dtype):
"Private function allowing recursion in _replace_dtype_fields."
_recurse = _replace_dtype_fields_recursive
# Do we have some name fields ?
if dtype.names is not None:
descr = []
for name in dtype.names:
field = dtype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recurse(field[0], primitive_dtype)))
new_dtype = np.dtype(descr)
# Is this some kind of composite a la (float,2)
elif dtype.subdtype:
descr = list(dtype.subdtype)
descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
new_dtype = np.dtype(tuple(descr))
# this is a primitive type, so do a direct replacement
else:
new_dtype = primitive_dtype
# preserve identity of dtypes
if new_dtype == dtype:
new_dtype = dtype
return new_dtype
def _replace_dtype_fields(dtype, primitive_dtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with all fields and subtypes in the given type
recursively replaced with `primitive_dtype`.
Arguments are coerced to dtypes first.
"""
dtype = np.dtype(dtype)
primitive_dtype = np.dtype(primitive_dtype)
return _replace_dtype_fields_recursive(dtype, primitive_dtype)
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
dtype('bool')
"""
return _replace_dtype_fields(ndtype, MaskType)
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]])
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.getmaskarray(b)
array([[False, False],
[False, False]])
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
ma.isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False])
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
... 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
... dtype=dtype)
>>> m
array([( True, False), (False, True), ( True, False)],
dtype=[('monty', '?'), ('pithon', '?')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def _shrink_mask(m):
"""
Shrink a mask to nomask if possible
"""
if m.dtype.names is None and not m.any():
return nomask
else:
return m
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interpreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True])
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False])
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
... 'formats':[np.int64, np.int64]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i8'), ('mouse', '<i8')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# legacy boolean special case: "existence of fields implies true"
if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
return np.ones(m.shape, dtype=dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink:
result = _shrink_mask(result)
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False])
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False])
"""
@recursive
def _recursive_mask_or(self, m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names is not None:
self(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if dtype1 != dtype2:
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1])
>>> np.ma.flatten_mask(mask)
array([False, False, True])
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> np.ma.flatten_mask(mask)
array([False, False, False, True])
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> np.ma.flatten_mask(mask)
array([False, False, False, False, False, True])
"""
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames is not None:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
yield from _flatsequence(element)
else:
yield element
except TypeError:
yield sequence
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis, keepdims=np._NoValue):
"Check whether there are masked values along the given axis"
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if mask is not nomask:
return mask.all(axis=axis, **kwargs)
return nomask
###############################################################################
# Masking functions #
###############################################################################
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data=['a', 'b', --, 'd'],
mask=[False, False, True, False],
fill_value='N/A',
dtype='<U1')
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data=[99, --, --, 3],
mask=[False, True, True, False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data=[99, --, --, 3],
mask=[False, True, True, False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data=[0, 1, --, 3],
mask=[False, False, True, False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data=[--, 1, 2, 3],
mask=[ True, False, False, False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data=[--, 1, --, --],
mask=[ True, False, True, True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition, shrink=False)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistent shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = _shrink_mask(cond)
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data=[0, 1, 2, --],
mask=[False, False, False, True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data=[0, 1, --, --],
mask=[False, False, True, True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data=[--, --, 2, 3],
mask=[ True, True, False, False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data=[--, --, 2, --],
mask=[ True, True, False, True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data=[0, 1, --, 3],
mask=[False, False, True, False],
fill_value=2)
"""
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
mask=[False, False, True, True, False, False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
mask=[False, False, True, True, False, False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data=[--, --, 0.01, 0.2, --, --],
mask=[ True, True, False, False, True, True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data=[--, --, 0.01, 0.2, --, --],
mask=[ True, True, False, False, True, True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> eat
masked_array(data=[--, 'ham'],
mask=[ True, False],
fill_value='green_eggs',
dtype=object)
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> eat
masked_array(data=['cheese', 'ham', 'pineapple'],
mask=False,
fill_value='green_eggs',
dtype=object)
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data=['cheese', 'ham', 'pineapple'],
mask=False,
fill_value='green_eggs',
dtype=object)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, determined using `isclose`. The default tolerances for
`masked_values` are the same as those for `isclose`.
For integer types, exact equality is used, in the same way as
`masked_equal`.
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol, atol : float, optional
Tolerance parameters passed on to `isclose`
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data=[1.0, --, 2.0, --, 3.0],
mask=[False, True, False, True, False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
mask=False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data=[0, 1, --, 3, 4],
mask=[False, False, True, False, False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data=[0, 1, --, 3, 4],
mask=[False, False, True, False, False],
fill_value=2)
"""
xnew = filled(x, value)
if np.issubdtype(xnew.dtype, np.floating):
mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
else:
mask = umath.equal(xnew, value)
ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
if shrink:
ret.shrink_mask()
return ret
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., nan, inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data=[0.0, 1.0, --, --, 4.0],
mask=[False, False, True, True, False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
###############################################################################
# Printing options #
###############################################################################
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__(self, display):
"""
Create the masked_print_option object.
"""
self._display = display
self._enabled = True
def display(self):
"""
Display the string to print for masked values.
"""
return self._display
def set_display(self, s):
"""
Set the string to print for masked values.
"""
self._display = s
def enabled(self):
"""
Is the use of the display value enabled?
"""
return self._enabled
def enable(self, shrink=1):
"""
Set the enabling shrink to `shrink`.
"""
self._enabled = shrink
def __str__(self):
return str(self._display)
__repr__ = __str__
# if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
if names is not None:
for name in names:
curdata = result[name]
curmask = mask[name]
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(result, printopt, where=mask)
return
# For better or worse, these end in a newline
_legacy_print_templates = dict(
long_std=textwrap.dedent("""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
"""),
long_flx=textwrap.dedent("""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
"""),
short_std=textwrap.dedent("""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
"""),
short_flx=textwrap.dedent("""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
)
###############################################################################
# MaskedArray class #
###############################################################################
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names is not None:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> np.ma.flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
def flatten_sequence(iterable):
"""
Flattens a compound of nested iterables.
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
yield from flatten_sequence(elm)
else:
yield elm
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
def _arraymethod(funcname, onmask=True):
"""
Return a class method wrapper around a basic array method.
Creates a class method which returns a masked array, where the new
``_data`` array is the output of the corresponding basic method called
on the original ``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
Returns
-------
method : instancemethod
Class method wrapper of the specified basic array method.
"""
def wrapped_method(self, *args, **params):
result = getattr(self._data, funcname)(*args, **params)
result = result.view(type(self))
result._update_from(self)
mask = self._mask
if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
# __setmask__ makes a copy, which we don't want
result._mask = getattr(mask, funcname)(*args, **params)
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
wrapped_method.__doc__ = methdoc.__doc__
wrapped_method.__name__ = funcname
return wrapped_method
class MaskedIterator:
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
# This won't work if ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> next(fl)
3
>>> next(fl)
masked
>>> next(fl)
Traceback (most recent call last):
...
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,
ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,
shrink=True, order=None)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C', then the array
will be in C-contiguous order (last-index varies the fastest).
If order is 'F', then the returned array will be in
Fortran-contiguous order (first-index varies the fastest).
If order is 'A' (default), then the returned array may be
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
Examples
--------
The ``mask`` can be initialized with an array of boolean values
with the same shape as ``data``.
>>> data = np.arange(6).reshape((2, 3))
>>> np.ma.MaskedArray(data, mask=[[False, True, False],
... [False, False, True]])
masked_array(
data=[[0, --, 2],
[3, 4, --]],
mask=[[False, True, False],
[False, False, True]],
fill_value=999999)
Alternatively, the ``mask`` can be initialized to homogeneous boolean
array with the same shape as ``data`` by passing in a scalar
boolean value:
>>> np.ma.MaskedArray(data, mask=False)
masked_array(
data=[[0, 1, 2],
[3, 4, 5]],
mask=[[False, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.MaskedArray(data, mask=True)
masked_array(
data=[[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True]],
fill_value=999999,
dtype=int64)
.. note::
The recommended practice for initializing ``mask`` with a scalar
boolean value is to use ``True``/``False`` rather than
``np.True_``/``np.False_``. The reason is :attr:`nomask`
is represented internally as ``np.False_``.
>>> np.False_ is np.ma.nomask
True
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
# Maximum number of elements per axis used when printing an array. The
# 1d case is handled separately because we need more values in this case.
_print_width = 100
_print_width_1d = 1500
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None, keep_mask=True,
hard_mask=None, shrink=True, order=None):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data.
_data = np.array(data, dtype=dtype, copy=copy,
order=order, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask.
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Here, we copy the _view_, so that we can attach new properties to it
# we must never do .view(MaskedConstant), as that would create a new
# instance of np.ma.masked, which make identity comparison fail
if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant):
_data = ndarray.view(_data, type(data))
else:
_data = ndarray.view(_data, cls)
# Backwards compatibility w/ numpy.core.ma.
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
# Type of the mask
mdtype = make_mask_descr(_data.dtype)
if mask is nomask:
# Case 1. : no mask in input.
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(np.asanyarray(m, dtype=mdtype))
for m in data], dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
_data._sharedmask = not copy
if copy:
_data._mask = _data._mask.copy()
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
if mask is True and mdtype == MaskType:
mask = np.ones(_data.shape, dtype=mdtype)
elif mask is False and mdtype == MaskType:
mask = np.zeros(_data.shape, dtype=mdtype)
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if _data.dtype.names is not None:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names is not None:
_recursive_or(af, bf)
else:
af |= bf
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check.
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
def _update_from(self, obj):
"""
Copies some attributes of obj to self.
"""
if isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""
Finalizes the masked array.
"""
# Get main attributes.
self._update_from(obj)
# We have to decide how to initialize self.mask, based on
# obj.mask. This is very difficult. There might be some
# correspondence between the elements in the array we are being
# created from (= obj) and us. Or there might not. This method can
# be called in all kinds of places for all kinds of reasons -- could
# be empty_like, could be slicing, could be a ufunc, could be a view.
# The numpy subclassing interface simply doesn't give us any way
# to know, which means that at best this method will be based on
# guesswork and heuristics. To make things worse, there isn't even any
# clear consensus about what the desired behavior is. For instance,
# most users think that np.empty_like(marr) -- which goes via this
# method -- should return a masked array with an empty mask (see
# gh-3404 and linked discussions), but others disagree, and they have
# existing code which depends on empty_like returning an array that
# matches the input mask.
#
# Historically our algorithm was: if the template object mask had the
# same *number of elements* as us, then we used *it's mask object
# itself* as our mask, so that writes to us would also write to the
# original array. This is horribly broken in multiple ways.
#
# Now what we do instead is, if the template object mask has the same
# number of elements as us, and we do not have the same base pointer
# as the template object (b/c views like arr[...] should keep the same
# mask), then we make a copy of the template object mask and use
# that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
if obj.dtype.names is not None:
_mask = getmaskarray(obj)
else:
_mask = getmask(obj)
# If self and obj point to exactly the same data, then probably
# self is a simple view of obj (e.g., self = obj[...]), so they
# should share the same mask. (This isn't 100% reliable, e.g. self
# could be the first row of obj, or have strange strides, but as a
# heuristic it's not bad.) In all other cases, we make a copy of
# the mask, so that future modifications to 'self' do not end up
# side-effecting 'obj' as well.
if (_mask is not nomask and obj.__array_interface__["data"][0]
!= self.__array_interface__["data"][0]):
# We should make a copy. But we could get here via astype,
# in which case the mask might need a new dtype as well
# (e.g., changing to or from a structured dtype), and the
# order could have changed. So, change the mask type if
# needed and use astype instead of copy.
if self.dtype == obj.dtype:
_mask_dtype = _mask.dtype
else:
_mask_dtype = make_mask_descr(self.dtype)
if self.flags.c_contiguous:
order = "C"
elif self.flags.f_contiguous:
order = "F"
else:
order = "K"
_mask = _mask.astype(_mask_dtype, order)
else:
# Take a view so shape changes, etc., do not propagate back.
_mask = _mask.view()
else:
_mask = nomask
self._mask = _mask
# Finalize the mask
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value
if self._fill_value is not None:
self._fill_value = _check_fill_value(self._fill_value, self.dtype)
elif self.dtype.names is not None:
# Finalize the default fill_value for structured arrays
self._fill_value = _check_fill_value(None, self.dtype)
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
if obj is self: # for in-place operations
result = obj
else:
result = obj.view(type(self))
result._update_from(self)
if context is not None:
result._mask = result._mask.copy()
func, args, out_i = context
# args sometimes contains outputs (gh-10459), which we don't want
input_args = args[:func.nin]
m = reduce(mask_or, [getmaskarray(arg) for arg in input_args])
# Get the domain mask
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
with np.errstate(divide='ignore', invalid='ignore'):
d = filled(domain(*input_args), True)
if d.any():
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result is not self and result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, then this argument is inferred from the passed `dtype`, or
in its absence the original array, as discussed in the notes below.
See Also
--------
numpy.ndarray.view : Equivalent method on ndarray object.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
if getmask(output) is not nomask:
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
dout = self.data[indx]
_mask = self._mask
def _is_scalar(m):
return not isinstance(m, np.ndarray)
def _scalar_heuristic(arr, elem):
"""
Return whether `elem` is a scalar result of indexing `arr`, or None
if undecidable without promoting nomask to a full mask
"""
# obviously a scalar
if not isinstance(elem, np.ndarray):
return True
# object array scalar indexing can return anything
elif arr.dtype.type is np.object_:
if arr.dtype is not elem.dtype:
# elem is an array, but dtypes do not match, so must be
# an element
return True
# well-behaved subclass that only returns 0d arrays when
# expected - this is not a scalar
elif type(arr).__getitem__ == ndarray.__getitem__:
return False
return None
if _mask is not nomask:
# _mask cannot be a subclass, so it tells us whether we should
# expect a scalar. It also cannot be of dtype object.
mout = _mask[indx]
scalar_expected = _is_scalar(mout)
else:
# attempt to apply the heuristic to avoid constructing a full mask
mout = nomask
scalar_expected = _scalar_heuristic(self.data, dout)
if scalar_expected is None:
# heuristics have failed
# construct a full array, so we can be certain. This is costly.
# we could also fall back on ndarray.__getitem__(self.data, indx)
scalar_expected = _is_scalar(getmaskarray(self)[indx])
# Did we extract a single item?
if scalar_expected:
# A record
if isinstance(dout, np.void):
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
return mvoid(dout, mask=mout, hardmask=self._hardmask)
# special case introduced in gh-5962
elif (self.dtype.type is np.object_ and
isinstance(dout, np.ndarray) and
dout is not masked):
# If masked, turn into a MaskedArray, with everything masked.
if mout:
return MaskedArray(dout, mask=True)
else:
return dout
# Just a scalar
else:
if mout:
return masked
else:
return dout
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if is_string_or_list_of_strings(indx):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# Something like gh-15895 has happened if this check fails.
# _fill_value should always be an ndarray.
if not isinstance(dout._fill_value, np.ndarray):
raise RuntimeError('Internal NumPy error.')
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# M.dtype[field].ndim). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value ==
dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
f"{indx!s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
f"all to {dout._fill_value[0]!s}.",
stacklevel=2)
# Need to use `.flat[0:1].squeeze(...)` instead of just
# `.flat[0]` to ensure the result is a 0d array and not
# a scalar.
dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0)
dout._isfield = True
# Update the mask if needed
if mout is not nomask:
# set shape to match that of data; this is needed for matrices
dout._mask = reshape(mout, dout.shape)
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
def __setitem__(self, indx, value):
"""
x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
_data = self._data
_mask = self._mask
if isinstance(indx, str):
_data[indx] = value
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
_dtype = _data.dtype
if value is masked:
# The mask wasn't set: create a full version.
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if _dtype.names is not None:
_mask[indx] = tuple([True] * len(_dtype.names))
else:
_mask[indx] = True
return
# Get the _data part of the new value
dval = getattr(value, '_data', value)
# Get the _mask part of the new value
mval = getmask(value)
if _dtype.names is not None and mval is nomask:
mval = tuple([False] * len(_dtype.names))
if _mask is nomask:
# Set the data, then the mask
_data[indx] = dval
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
# Set the data, then the mask
_data[indx] = dval
_mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
else:
if _dtype.names is not None:
err_msg = "Flexible 'hard' masks are not yet supported."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
_data[indx] = dindx
_mask[indx] = mindx
return
# Define so that we can overwrite the setter.
@property
def dtype(self):
return super(MaskedArray, self).dtype
@dtype.setter
def dtype(self, dtype):
super(MaskedArray, type(self)).dtype.__set__(self, dtype)
if self._mask is not nomask:
self._mask = self._mask.view(make_mask_descr(dtype), ndarray)
# Try to reset the shape of the mask (if we don't have a void).
# This raises a ValueError if the dtype change won't work.
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
@property
def shape(self):
return super(MaskedArray, self).shape
@shape.setter
def shape(self, shape):
super(MaskedArray, type(self)).shape.__set__(self, shape)
# Cannot use self._mask, since it may not (yet) exist when a
# masked matrix sets the shape.
if getmask(self) is not nomask:
self._mask.shape = self.shape
def __setmask__(self, mask, copy=False):
"""
Set the mask.
"""
idtype = self.dtype
current_mask = self._mask
if mask is masked:
mask = True
if current_mask is nomask:
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
if idtype.names is None:
# No named fields.
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
else:
# Named fields w/
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()] * len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
@property
def mask(self):
""" Current mask. """
# We could try to force a reshape, but that wouldn't work in some
# cases.
# Return a view so that the dtype and shape cannot be changed in place
# This still preserves nomask by identity
return self._mask.view()
@mask.setter
def mask(self, value):
self.__setmask__(value)
@property
def recordmask(self):
"""
Get or set the mask of the array if it has no named fields. For
structured arrays, returns a ndarray of booleans where entries are
``True`` if **all** the fields are masked, ``False`` otherwise:
>>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)],
... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],
... dtype=[('a', int), ('b', int)])
>>> x.recordmask
array([False, False, True, False, False])
"""
_mask = self._mask.view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis=-1)
@recordmask.setter
def recordmask(self, mask):
raise NotImplementedError("Coming soon: setting the mask per records!")
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `~ma.MaskedArray.hardmask` property. `harden_mask` sets
`~ma.MaskedArray.hardmask` to ``True``.
See Also
--------
ma.MaskedArray.hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `~ma.MaskedArray.hardmask` property. `soften_mask` sets
`~ma.MaskedArray.hardmask` to ``False``.
See Also
--------
ma.MaskedArray.hardmask
"""
self._hardmask = False
return self
@property
def hardmask(self):
""" Hardness of the mask """
return self._hardmask
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
@property
def sharedmask(self):
""" Share status of the mask (read-only). """
return self._sharedmask
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]])
>>> x.shrink_mask()
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> x.mask
False
"""
self._mask = _shrink_mask(self._mask)
return self
@property
def baseclass(self):
""" Class of the underlying data (read-only). """
return self._baseclass
def _get_data(self):
"""
Returns the underlying data, as a view of the masked array.
If the underlying data is a subclass of :class:`numpy.ndarray`, it is
returned as such.
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.data
matrix([[1, 2],
[3, 4]])
The type of the data can be accessed through the :attr:`baseclass`
attribute.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
@property
def flat(self):
""" Return a flat iterator, or set a flattened version of self to value. """
return MaskedIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
@property
def fill_value(self):
"""
The filling value of the masked array is a scalar. When setting, None
will set to a default based on the data type.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.fill_value = np.pi
>>> x.fill_value
3.1415926535897931 # may vary
Reset to default:
>>> x.fill_value = None
>>> x.fill_value
1e+20
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
# Temporary workaround to account for the fact that str and bytes
# scalars cannot be indexed with (), whereas all other numpy
# scalars can. See issues #7259 and #7267.
# The if-block can be removed after #7267 has been fixed.
if isinstance(self._fill_value, ndarray):
return self._fill_value[()]
return self._fill_value
@fill_value.setter
def fill_value(self, value=None):
target = _check_fill_value(value, self.dtype)
if not target.ndim == 0:
# 2019-11-12, 1.18.0
warnings.warn(
"Non-scalar arrays for the fill value are deprecated. Use "
"arrays with scalar values instead. The filled function "
"still supports any array as `fill_value`.",
DeprecationWarning, stacklevel=2)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
# kept for compatibility
get_fill_value = fill_value.fget
set_fill_value = fill_value.fset
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or non-scalar.
If non-scalar, the resulting ndarray must be broadcastable over
input array. Default is None, in which case, the `fill_value`
attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
>>> x.filled(fill_value=1000)
array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
Subclassing is preserved. This means that if, e.g., the data part of
the masked array is a recarray, `filled` returns a recarray:
>>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
>>> m = np.ma.array(x, mask=[(True, False), (False, True)])
>>> m.filled()
rec.array([(999999, 2), ( -3, 999999)],
dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
return self._data
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
if self is masked_singleton:
return np.asanyarray(fill_value)
if m.dtype.names is not None:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
# ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<class 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `~ma.MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`~ma.MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.compress([1, 0, 1])
masked_array(data=[1, 3],
mask=[False, False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(
data=[[1, 3],
[--, --],
[7, 9]],
mask=[[False, False],
[ True, True],
[False, False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray and forget the missing
# values.
condition = np.array(condition, copy=False, subok=False)
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
def _insert_masked_print(self):
"""
Replace masked values with masked_print_option, casting all innermost
dtypes to object.
"""
if masked_print_option.enabled():
mask = self._mask
if mask is nomask:
res = self._data
else:
# convert to object array to make filled work
data = self._data
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
print_width = (self._print_width if self.ndim > 1
else self._print_width_1d)
for axis in range(self.ndim):
if data.shape[axis] > print_width:
ind = print_width // 2
arr = np.split(data, (ind, -ind), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (ind, -ind), axis=axis)
mask = np.concatenate((arr[0], arr[2]), axis=axis)
rdtype = _replace_dtype_fields(self.dtype, "O")
res = data.astype(rdtype)
_recursive_printoption(res, mask, masked_print_option)
else:
res = self.filled(self.fill_value)
return res
def __str__(self):
return str(self._insert_masked_print())
def __repr__(self):
"""
Literal string representation.
"""
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
# 2016-11-19: Demoted to legacy format
if np.get_printoptions()['legacy'] == '1.13':
is_long = self.ndim > 1
parameters = dict(
name=name,
nlen=" " * len(name),
data=str(self),
mask=str(self._mask),
fill=str(self.fill_value),
dtype=str(self.dtype)
)
is_structured = bool(self.dtype.names)
key = '{}_{}'.format(
'long' if is_long else 'short',
'flx' if is_structured else 'std'
)
return _legacy_print_templates[key] % parameters
prefix = f"masked_{name}("
dtype_needed = (
not np.core.arrayprint.dtype_is_implied(self.dtype) or
np.all(self.mask) or
self.size == 0
)
# determine which keyword args need to be shown
keys = ['data', 'mask', 'fill_value']
if dtype_needed:
keys.append('dtype')
# array has only one row (non-column)
is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1])
# choose what to indent each keyword with
min_indent = 2
if is_one_row:
# first key on the same line as the type, remaining keys
# aligned by equals
indents = {}
indents[keys[0]] = prefix
for k in keys[1:]:
n = builtins.max(min_indent, len(prefix + keys[0]) - len(k))
indents[k] = ' ' * n
prefix = '' # absorbed into the first indent
else:
# each key on its own line, indented by two spaces
indents = {k: ' ' * min_indent for k in keys}
prefix = prefix + '\n' # first key on the next line
# format the field values
reprs = {}
reprs['data'] = np.array2string(
self._insert_masked_print(),
separator=", ",
prefix=indents['data'] + 'data=',
suffix=',')
reprs['mask'] = np.array2string(
self._mask,
separator=", ",
prefix=indents['mask'] + 'mask=',
suffix=',')
reprs['fill_value'] = repr(self.fill_value)
if dtype_needed:
reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype)
# join keys with values and indentations
result = ',\n'.join(
'{}{}={}'.format(indents[k], k, reprs[k])
for k in keys
)
return prefix + result + ')'
def _delegate_binop(self, other):
# This emulates the logic in
# private/binop_override.h:forward_binop_should_defer
if isinstance(other, type(self)):
return False
array_ufunc = getattr(other, "__array_ufunc__", False)
if array_ufunc is False:
other_priority = getattr(other, "__array_priority__", -1000000)
return self.__array_priority__ < other_priority
else:
# If array_ufunc is not None, it will be called inside the ufunc;
# None explicitly tells us to not call the ufunc, i.e., defer.
return array_ufunc is None
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names is not None:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = (mask == np.ones((), mask.dtype))
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
# Cast fill value to bool_ if needed. If it cannot be cast, the
# default boolean fill value is used.
if check._fill_value is not None:
try:
fill = _check_fill_value(check._fill_value, np.bool_)
except (TypeError, ValueError):
fill = _check_fill_value(None, np.bool_)
check._fill_value = fill
return check
def __eq__(self, other):
"""Check whether other equals self elementwise.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
return self._comparison(other, operator.eq)
def __ne__(self, other):
"""Check whether other does not equal self elementwise.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
return self._comparison(other, operator.ne)
def __add__(self, other):
"""
Add self to other, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return add(self, other)
def __radd__(self, other):
"""
Add other to self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other + self`.
return add(other, self)
def __sub__(self, other):
"""
Subtract other from self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return subtract(self, other)
def __rsub__(self, other):
"""
Subtract self from other, and return a new masked array.
"""
return subtract(other, self)
def __mul__(self, other):
"Multiply self by other, and return a new masked array."
if self._delegate_binop(other):
return NotImplemented
return multiply(self, other)
def __rmul__(self, other):
"""
Multiply other by self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other * self`.
return multiply(other, self)
def __div__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return divide(self, other)
def __truediv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return true_divide(self, other)
def __rtruediv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return true_divide(other, self)
def __floordiv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return floor_divide(self, other)
def __rfloordiv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return floor_divide(other, self)
def __pow__(self, other):
"""
Raise self to the power other, masking the potential NaNs/Infs
"""
if self._delegate_binop(other):
return NotImplemented
return power(self, other)
def __rpow__(self, other):
"""
Raise other to the power self, masking the potential NaNs/Infs
"""
return power(other, self)
def __iadd__(self, other):
"""
Add other to self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __isub__(self, other):
"""
Subtract other from self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__isub__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __imul__(self, other):
"""
Multiply self by other in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__imul__(np.where(self._mask, self.dtype.type(1),
getdata(other)))
return self
def __idiv__(self, other):
"""
Divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __itruediv__(self, other):
"""
True divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def __float__(self):
"""
Convert to float.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.", stacklevel=2)
return np.nan
return float(self.item())
def __int__(self):
"""
Convert to int.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
@property
def imag(self):
"""
The imaginary part of the masked array.
This property is a view on the imaginary part of this `MaskedArray`.
See Also
--------
real
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.imag
masked_array(data=[1.0, --, 1.6],
mask=[False, True, False],
fill_value=1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
# kept for compatibility
get_imag = imag.fget
@property
def real(self):
"""
The real part of the masked array.
This property is a view on the real part of this `MaskedArray`.
See Also
--------
imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.real
masked_array(data=[1.0, --, 3.45],
mask=[False, True, False],
fill_value=1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
# kept for compatibility
get_real = real.fget
def count(self, axis=None, keepdims=np._NoValue):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
The default, None, performs the count over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.10.0
If this is a tuple of ints, the count is performed on multiple
axes, instead of a single axis or all the axes as before.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
result : ndarray or scalar
An array with the same shape as the input array, with the specified
axis removed. If the array is a 0-d array, or if `axis` is None, a
scalar is returned.
See Also
--------
ma.count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, --, --]],
mask=[[False, False, False],
[ True, True, True]],
fill_value=999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
m = self._mask
# special case for matrices (we assume no other subclasses modify
# their dimensions)
if isinstance(self.data, np.matrix):
if m is nomask:
m = np.zeros(self.shape, dtype=np.bool_)
m = m.view(type(self.data))
if m is nomask:
# compare to _count_reduce_items in _methods.py
if self.shape == ():
if axis not in (None, 0):
raise np.AxisError(axis=axis, ndim=self.ndim)
return 1
elif axis is None:
if kwargs.get('keepdims', False):
return np.array(self.size, dtype=np.intp, ndmin=self.ndim)
return self.size
axes = normalize_axis_tuple(axis, self.ndim)
items = 1
for ax in axes:
items *= self.shape[ax]
if kwargs.get('keepdims', False):
out_dims = list(self.shape)
for a in axes:
out_dims[a] = 1
else:
out_dims = [d for n, d in enumerate(self.shape)
if n not in axes]
# make sure to return a 0-d array if axis is supplied
return np.full(out_dims, items, dtype=np.intp)
# take care of the masked singleton
if self is masked:
return 0
return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.ravel()
masked_array(data=[1, --, 3, --, 5, --, 7, --, 9],
mask=[False, True, False, True, False, True, False, True,
False],
fill_value=999999)
"""
r = ndarray.ravel(self._data, order=order).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)
else:
r._mask = nomask
return r
def reshape(self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> x
masked_array(
data=[[--, 2],
[3, --]],
mask=[[ True, False],
[False, True]],
fill_value=999999)
>>> x = x.reshape((4,1))
>>> x
masked_array(
data=[[--],
[2],
[3],
[--]],
mask=[[ True],
[False],
[False],
[ True]],
fill_value=999999)
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.put([0,4,8],[10,20,30])
>>> x
masked_array(
data=[[10, --, 3],
[--, 20, --],
[7, --, 30]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.put(4,999)
>>> x
masked_array(
data=[[10, --, 3],
[--, 999, --],
[7, --, 30]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
"""
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
self._data.put(indices, values, mode=mode)
# short circuit if neither self nor values are masked
if self._mask is nomask and getmask(values) is nomask:
return
m = getmaskarray(self)
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
return
def ids(self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832) # may vary
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284) # may vary
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
def all(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if all elements evaluate to True.
The output array is masked where all the values along the given axis
are masked: if the output would have been a scalar and that all the
values are masked, then the output is `masked`.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.ndarray.all : corresponding function for ndarrays
numpy.all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(True).all(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if any of the elements of `a` evaluate to True.
Masked values are considered as False during computation.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.ndarray.any : corresponding function for ndarrays
numpy.any : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(False).any(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
numpy.ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(
data=[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
mask=False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(
data=[[1.0, 0.0, 0.0],
[0.0, --, 0.0],
[0.0, 0.0, 1.0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(
data=[[False, False, False],
[ True, True, True],
[ True, True, True]],
mask=False,
fill_value=True)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=-1, out=out)
trace.__doc__ = ndarray.trace.__doc__
def dot(self, b, out=None, strict=False):
"""
a.dot(b, out=None)
Masked dot product of two arrays. Note that `out` and `strict` are
located in different positions than in `ma.dot`. In order to
maintain compatibility with the functional version, it is
recommended that the optional arguments be treated as keyword only.
At some point that may be mandatory.
.. versionadded:: 1.10.0
Parameters
----------
b : masked_array_like
Inputs array.
out : masked_array, optional
Output argument. This must have the exact kind that would be
returned if it was not used. In particular, it must have the
right type, must be C-contiguous, and its dtype must be the
dtype that would be returned for `ma.dot(a,b)`. This is a
performance feature. Therefore, if these conditions are not
met, an exception is raised, instead of attempting to be
flexible.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False)
for the computation. Default is False. Propagating the mask
means that if a masked value appears in a row or column, the
whole row or column is considered masked.
.. versionadded:: 1.10.2
See Also
--------
numpy.ma.dot : equivalent function
"""
return dot(self, b, out=out, strict=strict)
def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.sum()
25
>>> x.sum(axis=1)
masked_array(data=[4, 5, 16],
mask=[False, False, False],
fill_value=999999)
>>> x.sum(axis=0)
masked_array(data=[8, 5, 12],
mask=[False, False, False],
fill_value=999999)
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<class 'numpy.int64'>
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the array elements over the given axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumsum` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid :class:`ma.MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
numpy.ndarray.cumsum : corresponding function for ndarrays
numpy.cumsum : equivalent function
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> marr.cumsum()
masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33],
mask=[False, False, False, True, True, True, False, False,
False, False],
fill_value=999999)
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Refer to `numpy.prod` for full documentation.
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
See Also
--------
numpy.ndarray.prod : corresponding function for ndarrays
numpy.prod : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the array elements over the given axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumprod` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
numpy.ndarray.cumprod : corresponding function for ndarrays
numpy.cumprod : equivalent function
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data=[1, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis,
dtype=dtype, **kwargs)[()]
else:
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data=[-1., 0., 1.],
mask=False,
fill_value=1e+20)
"""
m = self.mean(axis, dtype)
if m is masked:
return m
if not axis:
return self - m
else:
return self - expand_dims(m, axis)
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
# Easy case: nomask, business as usual
if self._mask is nomask:
ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out,
ddof=ddof, **kwargs)[()]
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(nomask)
return out
return ret
# Some data are masked, yay!
cnt = self.count(axis=axis, **kwargs) - ddof
danom = self - self.mean(axis, dtype, keepdims=True)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
dvar._update_from(self)
elif getmask(dvar):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the standard deviation of the array elements along given axis.
Masked entries are ignored.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.ndarray.std : corresponding function for ndarrays
numpy.std : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
dvar = self.var(axis, dtype, out, ddof, **kwargs)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
def round(self, decimals=0, out=None):
"""
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.ndarray.round : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
def argsort(self, axis=np._NoValue, kind=None, order=None,
endwith=True, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. If None, the default, the flattened array
is used.
.. versionchanged:: 1.13.0
Previously, the default was documented to be -1, but that was
in error. At some future date, the default will change to -1, as
originally intended.
Until then, the axis should be given explicitly when
``arr.ndim > 1``, to avoid a FutureWarning.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
The sorting algorithm used.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
ma.MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
numpy.ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data=[3, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.argsort()
array([1, 0, 2])
"""
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(self)
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
fill_value = np.nan
else:
fill_value = minimum_fill_value(self)
else:
fill_value = maximum_fill_value(self)
filled = self.filled(fill_value)
return filled.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
ndarray or scalar
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(np.arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> x
masked_array(
data=[[--, --],
[2, 3]],
mask=[[ True, True],
[False, False]],
fill_value=999999)
>>> x.argmin(axis=0, fill_value=-1)
array([0, 0])
>>> x.argmin(axis=0, fill_value=9)
array([1, 1])
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis=-1, kind=None, order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
The sorting algorithm used.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values sorting at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
numpy.ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> a
masked_array(data=[1, 3, 5, --, --],
mask=[False, False, False, True, True],
fill_value=999999)
>>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> a
masked_array(data=[--, --, 1, 3, 5],
mask=[ True, True, False, False, False],
fill_value=999999)
>>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> a
masked_array(data=[1, --, --, 3, 5],
mask=[False, True, True, False, False],
fill_value=999999)
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
return
if self is masked:
return
sidx = self.argsort(axis=axis, kind=kind, order=order,
fill_value=fill_value, endwith=endwith)
self[...] = np.take_along_axis(self, sidx, axis=axis)
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
ma.minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
# unique to masked arrays
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
.. deprecated:: 1.13.0
This function is identical to both:
* ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)``
* ``np.ma.minimum.reduce(self, axis=axis)``
Typically though, ``self.min(axis=axis)`` is sufficient.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> x
masked_array(
data=[[0, --],
[2, 3],
[4, --]],
mask=[[False, True],
[False, False],
[False, True]],
fill_value=999999)
>>> x.mini()
masked_array(data=0,
mask=False,
fill_value=999999)
>>> x.mini(axis=0)
masked_array(data=[0, 3],
mask=[False, False],
fill_value=999999)
>>> x.mini(axis=1)
masked_array(data=[0, 2, 4],
mask=[False, False, False],
fill_value=999999)
There is a small difference between `mini` and `min`:
>>> x[:,1].mini(axis=0)
masked_array(data=3,
mask=False,
fill_value=999999)
>>> x[:,1].min(axis=0)
3
"""
# 2016-04-13, 1.13.0, gh-8764
warnings.warn(
"`mini` is deprecated; use the `min` method or "
"`np.ma.minimum.reduce instead.",
DeprecationWarning, stacklevel=2)
return minimum.reduce(self, axis)
def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
ma.maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None, keepdims=False):
"""
Return (maximum - minimum) along the given dimension
(i.e. peak-to-peak value).
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
Examples
--------
>>> x = np.ma.MaskedArray([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> x.ptp(axis=1)
masked_array(data=[8, 6],
mask=False,
fill_value=999999)
>>> x.ptp(axis=0)
masked_array(data=[2, 0, 5, 2],
mask=False,
fill_value=999999)
>>> x.ptp()
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.ma.MaskedArray([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> y.ptp(axis=1)
masked_array(data=[ 126, 127, -128, -127],
mask=False,
fill_value=999999,
dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> y.ptp(axis=1).view(np.uint8)
masked_array(data=[126, 127, 128, 129],
mask=False,
fill_value=999999,
dtype=uint8)
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value,
keepdims=keepdims)
result -= self.min(axis=axis, fill_value=fill_value,
keepdims=keepdims)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value,
keepdims=keepdims)
min_value = self.min(axis=axis, fill_value=fill_value,
keepdims=keepdims)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def partition(self, *args, **kwargs):
warnings.warn("Warning: 'partition' will ignore the 'mask' "
f"of the {self.__class__.__name__}.",
stacklevel=2)
return super(MaskedArray, self).partition(*args, **kwargs)
def argpartition(self, *args, **kwargs):
warnings.warn("Warning: 'argpartition' will ignore the 'mask' "
f"of the {self.__class__.__name__}.",
stacklevel=2)
return super(MaskedArray, self).argpartition(*args, **kwargs)
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getmask(indices)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data, promoting scalars to 0d arrays with [...] so that
# .view works correctly
if out is None:
out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
# demote 0d arrays back to scalars, for consistency with ndarray.take
return out[()]
# Array methods
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
flatten = _arraymethod('flatten')
repeat = _arraymethod('repeat')
squeeze = _arraymethod('squeeze')
swapaxes = _arraymethod('swapaxes')
T = property(fget=lambda self: self.transpose())
transpose = _arraymethod('transpose')
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array.
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays.
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays.
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
def tostring(self, fill_value=None, order='C'):
r"""
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`\ s.
.. deprecated:: 1.19.0
"""
# 2020-03-30, Numpy 1.19.0
warnings.warn(
"tostring() is deprecated. Use tobytes() instead.",
DeprecationWarning, stacklevel=2)
return self.tobytes(fill_value, order=order)
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Default is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
numpy.ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("MaskedArray.tofile() not implemented yet.")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.toflex()
array([[(1, False), (2, True), (3, False)],
[(4, True), (5, False), (6, True)],
[(7, False), (8, True), (9, False)]],
dtype=[('_data', '<i8'), ('_mask', '?')])
"""
# Get the basic dtype.
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super(MaskedArray, self).__reduce__()[2]
return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
super(MaskedArray, self).__setstate__((shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
@property
def _data(self):
# Make sure that the _data part is a np.void
return super(mvoid, self)._data[()]
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx], mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if m is nomask:
return str(self._data)
rdtype = _replace_dtype_fields(self._data.dtype, "O")
data_arr = super(mvoid, self)._data
res = data_arr.astype(rdtype)
_recursive_printoption(res, self._mask, masked_print_option)
return str(res)
__repr__ = __str__
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
yield from _data
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or
non-scalar. If latter is the case, the filled array should
be broadcastable over input array. Default is None, in
which case the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
##############################################################################
# Shortcuts #
##############################################################################
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(
data=[[1.0, --, --],
[--, 1.0, --],
[--, --, 1.0]],
mask=[[False, True, True],
[ True, False, True],
[ True, True, False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray # backward compatibility
class MaskedConstant(MaskedArray):
# the lone np.ma.masked instance
__singleton = None
@classmethod
def __has_singleton(cls):
# second case ensures `cls.__singleton` is not just a view on the
# superclass singleton
return cls.__singleton is not None and type(cls.__singleton) is cls
def __new__(cls):
if not cls.__has_singleton():
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
data = np.array(0.)
mask = np.array(True)
# prevent any modifications
data.flags.writeable = False
mask.flags.writeable = False
# don't fall back on MaskedArray.__new__(MaskedConstant), since
# that might confuse it - this way, the construction is entirely
# within our control
cls.__singleton = MaskedArray(data, mask=mask).view(cls)
return cls.__singleton
def __array_finalize__(self, obj):
if not self.__has_singleton():
# this handles the `.view` in __new__, which we want to copy across
# properties normally
return super(MaskedConstant, self).__array_finalize__(obj)
elif self is self.__singleton:
# not clear how this can happen, play it safe
pass
else:
# everywhere else, we want to downcast to MaskedArray, to prevent a
# duplicate maskedconstant.
self.__class__ = MaskedArray
MaskedArray.__array_finalize__(self, obj)
def __array_prepare__(self, obj, context=None):
return self.view(MaskedArray).__array_prepare__(obj, context)
def __array_wrap__(self, obj, context=None):
return self.view(MaskedArray).__array_wrap__(obj, context)
def __str__(self):
return str(masked_print_option._display)
def __repr__(self):
if self is MaskedConstant.__singleton:
return 'masked'
else:
# it's a subclass, or something is wrong, make it obvious
return object.__repr__(self)
def __format__(self, format_spec):
# Replace ndarray.__format__ with the default, which supports no format characters.
# Supporting format characters is unwise here, because we do not know what type
# the user was expecting - better to not guess.
try:
return object.__format__(self, format_spec)
except TypeError:
# 2020-03-23, NumPy 1.19.0
warnings.warn(
"Format strings passed to MaskedConstant are ignored, but in future may "
"error or produce different behavior",
FutureWarning, stacklevel=2
)
return object.__format__(self, "")
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
# inplace operations have no effect. We have to override them to avoid
# trying to modify the readonly data and mask arrays
def __iop__(self, other):
return self
__iadd__ = \
__isub__ = \
__imul__ = \
__ifloordiv__ = \
__itruediv__ = \
__ipow__ = \
__iop__
del __iop__ # don't leave this around
def copy(self, *args, **kwargs):
""" Copy is a no-op on the maskedconstant, as it is a scalar """
# maskedconstant is a scalar, so copy doesn't need to copy. There's
# precedent for this with `np.bool_` scalars.
return self
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
return super(MaskedConstant, self).__setattr__(attr, value)
elif self is self.__singleton:
raise AttributeError(
f"attributes of {self!r} are not writeable")
else:
# duplicate instance - we can end up here from __array_finalize__,
# where we set the __class__ attribute
return super(MaskedConstant, self).__setattr__(attr, value)
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=None,
mask=nomask, fill_value=None, keep_mask=True,
hard_mask=False, shrink=True, subok=True, ndmin=0):
"""
Shortcut to MaskedArray.
The options are in a different order for convenience and backwards
compatibility.
"""
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
subok=subok, keep_mask=keep_mask,
hard_mask=hard_mask, fill_value=fill_value,
ndmin=ndmin, shrink=shrink, order=order)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data=[0, 1, 0, 2, 3],
mask=False,
fill_value=42)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
##############################################################################
# Extrema functions #
##############################################################################
class _extrema_operation(_MaskedUFunc):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __init__(self, ufunc, compare, fill_value):
super(_extrema_operation, self).__init__(ufunc)
self.compare = compare
self.fill_value_func = fill_value
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
# 2016-04-13, 1.13.0
warnings.warn(
f"Single-argument form of np.ma.{self.__name__} is deprecated. Use "
f"np.ma.{self.__name__}.reduce instead.",
DeprecationWarning, stacklevel=2)
return self.reduce(a)
return where(self.compare(a, b), a, b)
def reduce(self, target, axis=np._NoValue):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is np._NoValue and target.ndim > 1:
# 2017-05-06, Numpy 1.13.0: warn on axis default
warnings.warn(
f"In the future the default for ma.{self.__name__}.reduce will be axis=0, "
f"not the current None, to match np.{self.__name__}.reduce. "
"Explicitly pass 0 or None to silence this warning.",
MaskedArrayFutureWarning, stacklevel=2)
axis = None
if axis is not np._NoValue:
kwargs = dict(axis=axis)
else:
kwargs = dict()
if m is nomask:
t = self.f.reduce(target, **kwargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
t = self.f.reduce(target, **kwargs)
m = umath.logical_and.reduce(m, **kwargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
def outer(self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.f.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a min method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value,
out=out, **kwargs)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a max method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value,
out=out, **kwargs)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value,
out=out, **kwargs)
ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
# Definition of functions from the corresponding methods #
##############################################################################
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (
signature, getattr(meth, '__doc__', None))
return doc
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
a, args[0] = args[0], a
marr = asanyarray(a)
method_name = self.__name__
method = getattr(type(marr), method_name, None)
if method is None:
# use the corresponding np function
method = getattr(np, method_name)
return method(marr, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value)
mean = _frommethod('mean')
minimum = _extrema_operation(umath.minimum, less, minimum_fill_value)
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
count = _frommethod('count')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate(divide='ignore', invalid='ignore'):
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not result.ndim:
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
argmin = _frommethod('argmin')
argmax = _frommethod('argmax')
def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.asanyarray(a)
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(a)
if isinstance(a, MaskedArray):
return a.argsort(axis=axis, kind=kind, order=order,
endwith=endwith, fill_value=fill_value)
else:
return a.argsort(axis=axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None):
"""
Return a sorted copy of the masked array.
Equivalent to creating a copy of the array
and applying the MaskedArray ``sort()`` method.
Refer to ``MaskedArray.sort`` for the full documentation
See Also
--------
MaskedArray.sort : equivalent method
"""
a = np.array(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if isinstance(a, MaskedArray):
a.sort(axis=axis, kind=kind, order=order,
endwith=endwith, fill_value=fill_value)
else:
a.sort(axis=axis, kind=kind, order=order)
return a
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`ma.MaskedArray`, see `ma.MaskedArray.compressed` for details.
See Also
--------
ma.MaskedArray.compressed
Equivalent method.
"""
return asanyarray(x).compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
masked_array(data=[2, 3, 4],
mask=False,
fill_value=999999)
>>> ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask.
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
dm = dm.reshape(d.shape)
# If we decide to keep a '_shrinkmask' option, we want to check that
# all of them are True, and then check for dm.any()
data._mask = _shrink_mask(dm)
return data
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift(a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>> x
masked_array(
data=[[0, 1],
[2, --]],
mask=[[False, False],
[False, True]],
fill_value=999999)
>>> ma.transpose(x)
masked_array(
data=[[0, 2],
[1, --]],
mask=[[False, False],
[False, True]],
fill_value=999999)
"""
# We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
# We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=999999)
>>> np.resize(a, (3, 3))
masked_array(
data=[[1, 2, 3],
[4, 1, 2],
[3, 4, 1]],
mask=False,
fill_value=999999)
>>> ma.resize(a, (3, 3))
masked_array(
data=[[1, --, 3],
[4, 1, --],
[3, 4, 1]],
mask=[[False, True, False],
[False, False, True],
[False, False, False]],
fill_value=999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(
data=[[1, 2, 3],
[4, 1, 2],
[3, 4, 1]],
mask=False,
fill_value=999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
def ndim(obj):
"""
maskedarray version of the numpy function.
"""
return np.ndim(getdata(obj))
ndim.__doc__ = np.ndim.__doc__
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
##############################################################################
# Extra functions #
##############################################################################
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from `x` or `y`, depending on condition.
.. note::
When only `condition` is provided, this function is identical to
`nonzero`. The rest of this documentation covers only the case where
all three arguments are provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : MaskedArray
An masked array with `masked` elements where the condition is masked,
elements from `x` where `condition` is True, and elements from `y`
elsewhere.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
nonzero : The function that is called when x and y are omitted
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> x
masked_array(
data=[[0.0, --, 2.0],
[--, 4.0, --],
[6.0, --, 8.0]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=1e+20)
>>> np.ma.where(x > 5, x, -3.1416)
masked_array(
data=[[-3.1416, --, -3.1416],
[--, -3.1416, --],
[6.0, --, 8.0]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=1e+20)
"""
# handle the single-argument case
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return nonzero(condition)
# we only care if the condition is true - false or masked pick y
cf = filled(condition, False)
xd = getdata(x)
yd = getdata(y)
# we need the full arrays here for correct final dimensions
cm = getmaskarray(condition)
xm = getmaskarray(x)
ym = getmaskarray(y)
# deal with the fact that masked.dtype == float64, but we don't actually
# want to treat it as that.
if x is masked and y is not masked:
xd = np.zeros((), dtype=yd.dtype)
xm = np.ones((), dtype=ym.dtype)
elif y is masked and x is not masked:
yd = np.zeros((), dtype=xd.dtype)
ym = np.ones((), dtype=xm.dtype)
data = np.where(cf, xd, yd)
mask = np.where(cf, xm, ym)
mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)
# collapse the mask, for backwards compatibility
mask = _shrink_mask(mask)
return masked_array(data, mask=mask)
def choose(indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data=[3, 2, 1],
mask=False,
fill_value=999999)
"""
def fmask(x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask(x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices.
c = filled(indices, 0)
# Get the masks.
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=False, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
# Needed by dot, so move here from extras.py. It will still be exported
# from extras.py for compatibility.
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_rowcols(a)
masked_array(
data=[[0, --, 0],
[--, --, --],
[0, --, 0]],
mask=[[False, True, False],
[ True, True, True],
[False, True, False]],
fill_value=1)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
# Include masked dot here to avoid import problems in getting it from
# extras.py. Note that it is not included in __all__, but rather exported
# from extras in order to avoid backward compatibility problems.
def dot(a, b, strict=False, out=None):
"""
Return the dot product of two arrays.
This function is the equivalent of `numpy.dot` that takes masked values
into account. Note that `strict` and `out` are in different position
than in the method version. In order to maintain compatibility with the
corresponding method, it is recommended that the optional arguments be
treated as keyword only. At some point that may be mandatory.
.. note::
Works only with 2-D arrays at the moment.
Parameters
----------
a, b : masked_array_like
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for
the computation. Default is False. Propagating the mask means that
if a masked value appears in a row or column, the whole row or
column is considered masked.
out : masked_array, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.10.2
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(
data=[[21, 26],
[45, 64]],
mask=[[False, False],
[False, False]],
fill_value=999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(
data=[[--, --],
[--, 64]],
mask=[[ True, True],
[ True, False]],
fill_value=999999)
"""
# !!!: Works only with 2D arrays. There should be a way to get it to run
# with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rowcols(a, 0)
b = mask_rowcols(b, 1)
am = ~getmaskarray(a)
bm = ~getmaskarray(b)
if out is None:
d = np.dot(filled(a, 0), filled(b, 0))
m = ~np.dot(am, bm)
if d.ndim == 0:
d = np.asarray(d)
r = d.view(get_masked_subclass(a, b))
r.__setmask__(m)
return r
else:
d = np.dot(filled(a, 0), filled(b, 0), out._data)
if out.mask.shape != d.shape:
out._mask = np.empty(d.shape, MaskType)
np.dot(am, bm, out._mask)
np.logical_not(out._mask, out._mask)
return out
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b. The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if fa.ndim == 0:
fa.shape = (1,)
if fb.ndim == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def _convolve_or_correlate(f, a, v, mode, propagate_mask):
"""
Helper function for ma.correlate and ma.convolve
"""
if propagate_mask:
# results which are contributed to by either item in any pair being invalid
mask = (
f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)
| f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)
)
data = f(getdata(a), getdata(v), mode=mode)
else:
# results which are not contributed to by any pair of valid elements
mask = ~f(~getmaskarray(a), ~getmaskarray(v))
data = f(filled(a, 0), filled(v, 0), mode=mode)
return masked_array(data, mask=mask)
def correlate(a, v, mode='valid', propagate_mask=True):
"""
Cross-correlation of two 1-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
propagate_mask : bool
If True, then a result element is masked if any masked element contributes towards it.
If False, then a result element is only masked if no non-masked element
contribute towards it
Returns
-------
out : MaskedArray
Discrete cross-correlation of `a` and `v`.
See Also
--------
numpy.correlate : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)
def convolve(a, v, mode='full', propagate_mask=True):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring.
propagate_mask : bool
If True, then if any masked element is included in the sum for a result
element, then the result is masked.
If False, then the result element is only masked if no non-masked cells
contribute towards it
Returns
-------
out : MaskedArray
Discrete, linear convolution of `a` and `v`.
See Also
--------
numpy.convolve : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)
def allequal(a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data=[10000000000.0, 1e-07, --],
mask=[False, False, True],
fill_value=1e+20)
>>> b = np.array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> np.ma.allequal(a, b, fill_value=False)
False
>>> np.ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data=[10000000000.0, 1e-07, --],
mask=[False, False, True],
fill_value=1e+20)
>>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> np.ma.allclose(a, b)
False
>>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> np.ma.allclose(a, b)
True
>>> np.ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> np.ma.allclose(a, b)
True
>>> np.ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(
data=[[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]],
mask=False,
fill_value=1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
order = order or 'C'
return masked_array(a, dtype=dtype, copy=False, keep_mask=True,
subok=False, order=order)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(
data=[[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]],
mask=False,
fill_value=1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
# workaround for #8666, to preserve identity. Ideally the bottom line
# would handle this for us.
if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype):
return a
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
##############################################################################
# Pickling #
##############################################################################
def _pickle_warn(method):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
f"np.ma.{method} is deprecated, use pickle.{method} instead",
DeprecationWarning, stacklevel=3)
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i8'), ('_mask', '?')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(
data=[[0, --, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.), (0, False, 0.)],
[(0, False, 0.), (0, False, 0.)]],
dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(
data=[[0, 0],
[0, 0]],
mask=[[False, False],
[False, False]],
fill_value=999999,
dtype=int32)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
def __call__(self, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(*args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma(
'identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Values are appended to a copy of this array.
b : array_like
These values are appended to a copy of `a`. It must be of the
correct shape (the same shape as `a`, excluding `axis`). If `axis`
is not specified, `b` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `v` are appended. If `axis` is not given,
both `a` and `b` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `a` with `b` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> ma.append(a, b)
masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9],
mask=[False, True, False, False, False, False, True, False,
False],
fill_value=999999)
"""
return concatenate([a, b], axis)
| [
"numpy.core.umath.power",
"numpy.core.umath.less",
"numpy.split",
"numpy.void",
"numpy.resize",
"numpy.empty",
"numpy.ones",
"builtins.all",
"numpy.AxisError",
"numpy.shape",
"numpy.isclose",
"numpy.core.umath.less_equal",
"numpy.inner",
"numpy.diag",
"numpy.core.arrayprint.dtype_is_impl... | [((6685, 6708), 'numpy.datetime64', 'np.datetime64', (['"""NaT"""', 'v'], {}), "('NaT', v)\n", (6698, 6708), True, 'import numpy as np\n'), ((6748, 6772), 'numpy.timedelta64', 'np.timedelta64', (['"""NaT"""', 'v'], {}), "('NaT', v)\n", (6762, 6772), True, 'import numpy as np\n'), ((13864, 13880), 'numpy.dtype', 'np.dtype', (['ndtype'], {}), '(ndtype)\n', (13872, 13880), True, 'import numpy as np\n'), ((15477, 15497), 'numpy.array', 'np.array', (['fill_value'], {}), '(fill_value)\n', (15485, 15497), True, 'import numpy as np\n'), ((42267, 42282), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (42275, 42282), True, 'import numpy as np\n'), ((42306, 42331), 'numpy.dtype', 'np.dtype', (['primitive_dtype'], {}), '(primitive_dtype)\n', (42314, 42331), True, 'import numpy as np\n'), ((56314, 56330), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (56324, 56330), True, 'import numpy as np\n'), ((56391, 56435), 'numpy.array', 'np.array', (['[_ for _ in flattened]'], {'dtype': 'bool'}), '([_ for _ in flattened], dtype=bool)\n', (56399, 56435), True, 'import numpy as np\n'), ((60298, 60332), 'numpy.array', 'np.array', (['a'], {'copy': 'copy', 'subok': '(True)'}), '(a, copy=copy, subok=True)\n', (60306, 60332), True, 'import numpy as np\n'), ((71505, 71543), 'numpy.issubdtype', 'np.issubdtype', (['xnew.dtype', 'np.floating'], {}), '(xnew.dtype, np.floating)\n', (71518, 71543), True, 'import numpy as np\n'), ((72638, 72672), 'numpy.array', 'np.array', (['a'], {'copy': 'copy', 'subok': '(True)'}), '(a, copy=copy, subok=True)\n', (72646, 72672), True, 'import numpy as np\n'), ((77295, 77311), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (77308, 77311), True, 'import numpy as np\n'), ((211280, 211327), 'numpy.ndarray.__new__', 'ndarray.__new__', (['baseclass', 'baseshape', 'basetype'], {}), '(baseclass, baseshape, basetype)\n', (211295, 211327), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((232331, 232347), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (232344, 232347), True, 'import numpy as np\n'), ((233162, 233196), 'numpy.array', 'np.array', (['a'], {'copy': '(True)', 'subok': '(True)'}), '(a, copy=True, subok=True)\n', (233170, 233196), True, 'import numpy as np\n'), ((238671, 238710), 'numpy.copyto', 'np.copyto', (['a._data', 'valdata'], {'where': 'mask'}), '(a._data, valdata, where=mask)\n', (238680, 238710), True, 'import numpy as np\n'), ((245427, 245447), 'numpy.where', 'np.where', (['cf', 'xd', 'yd'], {}), '(cf, xd, yd)\n', (245435, 245447), True, 'import numpy as np\n'), ((245460, 245480), 'numpy.where', 'np.where', (['cf', 'xm', 'ym'], {}), '(cf, xm, ym)\n', (245468, 245480), True, 'import numpy as np\n'), ((247646, 247676), 'numpy.choose', 'np.choose', (['c', 'masks'], {'mode': 'mode'}), '(c, masks, mode=mode)\n', (247655, 247676), True, 'import numpy as np\n'), ((255505, 255521), 'numpy.outer', 'np.outer', (['fa', 'fb'], {}), '(fa, fb)\n', (255513, 255521), True, 'import numpy as np\n'), ((263514, 263523), 'numpy.all', 'np.all', (['d'], {}), '(d)\n', (263520, 263523), True, 'import numpy as np\n'), ((266836, 266949), 'warnings.warn', 'warnings.warn', (['f"""np.ma.{method} is deprecated, use pickle.{method} instead"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(f'np.ma.{method} is deprecated, use pickle.{method} instead',\n DeprecationWarning, stacklevel=3)\n", (266849, 266949), False, 'import warnings\n'), ((4498, 4736), 'warnings.warn', 'warnings.warn', (['"""In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. Explicitly pass -1 or None to silence this warning."""', 'MaskedArrayFutureWarning'], {'stacklevel': '(3)'}), "(\n 'In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. Explicitly pass -1 or None to silence this warning.'\n , MaskedArrayFutureWarning, stacklevel=3)\n", (4511, 4736), False, 'import warnings\n'), ((5064, 5092), 'inspect.cleandoc', 'inspect.cleandoc', (['initialdoc'], {}), '(initialdoc)\n', (5080, 5092), False, 'import inspect\n'), ((5136, 5158), 'inspect.cleandoc', 'inspect.cleandoc', (['note'], {}), '(note)\n', (5152, 5158), False, 'import inspect\n'), ((23571, 23591), 'numpy.isfinite', 'np.isfinite', (['a._data'], {}), '(a._data)\n', (23582, 23591), True, 'import numpy as np\n'), ((41472, 41487), 'numpy.dtype', 'np.dtype', (['descr'], {}), '(descr)\n', (41480, 41487), True, 'import numpy as np\n'), ((50650, 50679), 'numpy.ones', 'np.ones', (['m.shape'], {'dtype': 'dtype'}), '(m.shape, dtype=dtype)\n', (50657, 50679), True, 'import numpy as np\n'), ((52394, 52428), 'numpy.zeros', 'np.zeros', (['newshape'], {'dtype': 'MaskType'}), '(newshape, dtype=MaskType)\n', (52402, 52428), True, 'import numpy as np\n'), ((54728, 54752), 'numpy.core.umath.logical_or', 'umath.logical_or', (['m1', 'm2'], {}), '(m1, m2)\n', (54744, 54752), True, 'import numpy.core.umath as umath\n'), ((69045, 69072), 'numpy.core.umath.equal', 'umath.equal', (['x._data', 'value'], {}), '(x._data, value)\n', (69056, 69072), True, 'import numpy.core.umath as umath\n'), ((71561, 71606), 'numpy.isclose', 'np.isclose', (['xnew', 'value'], {'atol': 'atol', 'rtol': 'rtol'}), '(xnew, value, atol=atol, rtol=rtol)\n', (71571, 71606), True, 'import numpy as np\n'), ((71634, 71658), 'numpy.core.umath.equal', 'umath.equal', (['xnew', 'value'], {}), '(xnew, value)\n', (71645, 71658), True, 'import numpy.core.umath as umath\n'), ((74746, 74785), 'numpy.copyto', 'np.copyto', (['result', 'printopt'], {'where': 'mask'}), '(result, printopt, where=mask)\n', (74755, 74785), True, 'import numpy as np\n'), ((74894, 75076), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """'], {}), '(\n """ masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """\n )\n', (74909, 75076), False, 'import textwrap\n'), ((75090, 75314), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """'], {}), '(\n """ masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """\n )\n', (75105, 75314), False, 'import textwrap\n'), ((75330, 75494), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """'], {}), '(\n """ masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """\n )\n', (75345, 75494), False, 'import textwrap\n'), ((75507, 75713), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """'], {}), '(\n """ masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """\n )\n', (75522, 75713), False, 'import textwrap\n'), ((87192, 87268), 'numpy.array', 'np.array', (['data'], {'dtype': 'dtype', 'copy': 'copy', 'order': 'order', 'subok': '(True)', 'ndmin': 'ndmin'}), '(data, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin)\n', (87200, 87268), True, 'import numpy as np\n'), ((120553, 120588), 'numpy.ndarray.view', 'ndarray.view', (['self', 'self._baseclass'], {}), '(self, self._baseclass)\n', (120565, 120588), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((126621, 126646), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._data'], {}), '(self._data)\n', (126634, 126646), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((128954, 128998), 'numpy.array', 'np.array', (['condition'], {'copy': '(False)', 'subok': '(False)'}), '(condition, copy=False, subok=False)\n', (128962, 128998), True, 'import numpy as np\n'), ((132061, 132110), 'builtins.all', 'builtins.all', (['(dim == 1 for dim in self.shape[:-1])'], {}), '(dim == 1 for dim in self.shape[:-1])\n', (132073, 132110), False, 'import builtins\n'), ((133058, 133151), 'numpy.array2string', 'np.array2string', (['self._mask'], {'separator': '""", """', 'prefix': "(indents['mask'] + 'mask=')", 'suffix': '""","""'}), "(self._mask, separator=', ', prefix=indents['mask'] +\n 'mask=', suffix=',')\n", (133073, 133151), True, 'import numpy as np\n'), ((178405, 178423), 'numpy.iscomplexobj', 'iscomplexobj', (['self'], {}), '(self)\n', (178417, 178423), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((190425, 190466), 'numpy.take_along_axis', 'np.take_along_axis', (['self', 'sidx'], {'axis': 'axis'}), '(self, sidx, axis=axis)\n', (190443, 190466), True, 'import numpy as np\n'), ((195117, 195254), 'warnings.warn', 'warnings.warn', (['"""`mini` is deprecated; use the `min` method or `np.ma.minimum.reduce instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n '`mini` is deprecated; use the `min` method or `np.ma.minimum.reduce instead.'\n , DeprecationWarning, stacklevel=2)\n", (195130, 195254), False, 'import warnings\n'), ((201364, 201418), 'numpy.subtract', 'np.subtract', (['out', 'min_value'], {'out': 'out', 'casting': '"""unsafe"""'}), "(out, min_value, out=out, casting='unsafe')\n", (201375, 201418), True, 'import numpy as np\n'), ((201493, 201612), 'warnings.warn', 'warnings.warn', (['f"""Warning: \'partition\' will ignore the \'mask\' of the {self.__class__.__name__}."""'], {'stacklevel': '(2)'}), '(\n f"Warning: \'partition\' will ignore the \'mask\' of the {self.__class__.__name__}."\n , stacklevel=2)\n', (201506, 201612), False, 'import warnings\n'), ((201777, 201899), 'warnings.warn', 'warnings.warn', (['f"""Warning: \'argpartition\' will ignore the \'mask\' of the {self.__class__.__name__}."""'], {'stacklevel': '(2)'}), '(\n f"Warning: \'argpartition\' will ignore the \'mask\' of the {self.__class__.__name__}."\n , stacklevel=2)\n', (201790, 201899), False, 'import warnings\n'), ((205526, 205629), 'warnings.warn', 'warnings.warn', (['"""tostring() is deprecated. Use tobytes() instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "('tostring() is deprecated. Use tobytes() instead.',\n DeprecationWarning, stacklevel=2)\n", (205539, 205629), False, 'import warnings\n'), ((209296, 209370), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'self.shape', 'dtype': "[('_data', ddtype), ('_mask', mdtype)]"}), "(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)])\n", (209306, 209370), True, 'import numpy as np\n'), ((211748, 211799), 'numpy.array', 'np.array', (['data'], {'copy': 'copy', 'subok': 'subok', 'dtype': 'dtype'}), '(data, copy=copy, subok=subok, dtype=dtype)\n', (211756, 211799), True, 'import numpy as np\n'), ((224696, 224734), 'numpy.array', 'narray', (['target'], {'copy': '(False)', 'subok': '(True)'}), '(target, copy=False, subok=True)\n', (224702, 224734), True, 'from numpy import array as narray\n'), ((231442, 231488), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (231453, 231488), True, 'import numpy as np\n'), ((231838, 231863), 'numpy.logical_or', 'np.logical_or', (['m', 'invalid'], {}), '(m, invalid)\n', (231851, 231863), True, 'import numpy as np\n'), ((236075, 236094), 'numpy.diag', 'np.diag', (['v._mask', 'k'], {}), '(v._mask, k)\n', (236082, 236094), True, 'import numpy as np\n'), ((241831, 241854), 'numpy.resize', 'np.resize', (['m', 'new_shape'], {}), '(m, new_shape)\n', (241840, 241854), True, 'import numpy as np\n'), ((245212, 245240), 'numpy.zeros', 'np.zeros', (['()'], {'dtype': 'yd.dtype'}), '((), dtype=yd.dtype)\n', (245220, 245240), True, 'import numpy as np\n'), ((245255, 245282), 'numpy.ones', 'np.ones', (['()'], {'dtype': 'ym.dtype'}), '((), dtype=ym.dtype)\n', (245262, 245282), True, 'import numpy as np\n'), ((245506, 245535), 'numpy.ones', 'np.ones', (['()'], {'dtype': 'mask.dtype'}), '((), dtype=mask.dtype)\n', (245513, 245535), True, 'import numpy as np\n'), ((248848, 248875), 'numpy.round_', 'np.round_', (['a', 'decimals', 'out'], {}), '(a, decimals, out)\n', (248857, 248875), True, 'import numpy as np\n'), ((254696, 254721), 'numpy.dot', 'np.dot', (['am', 'bm', 'out._mask'], {}), '(am, bm, out._mask)\n', (254702, 254721), True, 'import numpy as np\n'), ((254731, 254767), 'numpy.logical_not', 'np.logical_not', (['out._mask', 'out._mask'], {}), '(out._mask, out._mask)\n', (254745, 254767), True, 'import numpy as np\n'), ((259691, 259708), 'numpy.core.umath.equal', 'umath.equal', (['x', 'y'], {}), '(x, y)\n', (259702, 259708), True, 'import numpy.core.umath as umath\n'), ((262722, 262744), 'numpy.result_type', 'np.result_type', (['y', '(1.0)'], {}), '(y, 1.0)\n', (262736, 262744), True, 'import numpy as np\n'), ((263124, 263136), 'numpy.any', 'np.any', (['xinf'], {}), '(xinf)\n', (263130, 263136), True, 'import numpy as np\n'), ((263264, 263273), 'numpy.all', 'np.all', (['d'], {}), '(d)\n', (263270, 263273), True, 'import numpy as np\n'), ((7558, 7585), 'numpy.array', 'np.array', (['vals'], {'dtype': 'dtype'}), '(vals, dtype=dtype)\n', (7566, 7585), True, 'import numpy as np\n'), ((7757, 7779), 'numpy.full', 'np.full', (['shape', 'subval'], {}), '(shape, subval)\n', (7764, 7779), True, 'import numpy as np\n'), ((21865, 21901), 'numpy.array', 'np.array', (['a'], {'copy': '(False)', 'subok': 'subok'}), '(a, copy=False, subok=subok)\n', (21873, 21901), True, 'import numpy as np\n'), ((24833, 24862), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (24844, 24862), True, 'import numpy as np\n'), ((25352, 25381), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (25363, 25381), True, 'import numpy as np\n'), ((26044, 26057), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (26054, 26057), True, 'import numpy as np\n'), ((26059, 26072), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (26069, 26072), True, 'import numpy as np\n'), ((26087, 26116), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (26098, 26116), True, 'import numpy as np\n'), ((26511, 26540), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (26522, 26540), True, 'import numpy as np\n'), ((26562, 26602), 'numpy.core.umath.less_equal', 'umath.less_equal', (['x', 'self.critical_value'], {}), '(x, self.critical_value)\n', (26578, 26602), True, 'import numpy.core.umath as umath\n'), ((26933, 26962), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (26944, 26962), True, 'import numpy as np\n'), ((26984, 27018), 'numpy.core.umath.less', 'umath.less', (['x', 'self.critical_value'], {}), '(x, self.critical_value)\n', (26994, 27018), True, 'import numpy.core.umath as umath\n'), ((31070, 31083), 'numpy.errstate', 'np.errstate', ([], {}), '()\n', (31081, 31083), True, 'import numpy as np\n'), ((31098, 31142), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (31107, 31142), True, 'import numpy as np\n'), ((33000, 33033), 'numpy.core.umath.logical_and.reduce', 'umath.logical_and.reduce', (['m', 'axis'], {}), '(m, axis)\n', (33024, 33033), True, 'import numpy.core.umath as umath\n'), ((33676, 33706), 'numpy.core.umath.logical_or.outer', 'umath.logical_or.outer', (['ma', 'mb'], {}), '(ma, mb)\n', (33698, 33706), True, 'import numpy.core.umath as umath\n'), ((33808, 33833), 'numpy.copyto', 'np.copyto', (['d', 'da'], {'where': 'm'}), '(d, da, where=m)\n', (33817, 33833), True, 'import numpy as np\n'), ((35625, 35671), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (35636, 35671), True, 'import numpy as np\n'), ((35814, 35836), 'numpy.core.umath.isfinite', 'umath.isfinite', (['result'], {}), '(result)\n', (35828, 35836), True, 'import numpy.core.umath as umath\n'), ((36357, 36404), 'numpy.copyto', 'np.copyto', (['result', '(0)'], {'casting': '"""unsafe"""', 'where': 'm'}), "(result, 0, casting='unsafe', where=m)\n", (36366, 36404), True, 'import numpy as np\n'), ((36488, 36509), 'numpy.core.umath.multiply', 'umath.multiply', (['m', 'da'], {}), '(m, da)\n', (36502, 36509), True, 'import numpy.core.umath as umath\n'), ((36580, 36638), 'numpy.can_cast', 'np.can_cast', (['masked_da.dtype', 'result.dtype'], {'casting': '"""safe"""'}), "(masked_da.dtype, result.dtype, casting='safe')\n", (36591, 36638), True, 'import numpy as np\n'), ((46018, 46031), 'numpy.shape', 'np.shape', (['arr'], {}), '(arr)\n', (46026, 46031), True, 'import numpy as np\n'), ((69141, 69154), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (69151, 69154), True, 'import numpy as np\n'), ((72905, 72919), 'numpy.isfinite', 'np.isfinite', (['a'], {}), '(a)\n', (72916, 72919), True, 'import numpy as np\n'), ((76294, 76348), 'numpy.copyto', 'np.copyto', (['current', 'fill_value[name]'], {'where': 'mask[name]'}), '(current, fill_value[name], where=mask[name])\n', (76303, 76348), True, 'import numpy as np\n'), ((87916, 87940), 'numpy.ndarray.view', 'ndarray.view', (['_data', 'cls'], {}), '(_data, cls)\n', (87928, 87940), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((114784, 114810), 'numpy.array', 'np.array', (['mask'], {'copy': '(False)'}), '(mask, copy=False)\n', (114792, 114810), True, 'import numpy as np\n'), ((122385, 122599), 'warnings.warn', 'warnings.warn', (['"""Non-scalar arrays for the fill value are deprecated. Use arrays with scalar values instead. The filled function still supports any array as `fill_value`."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'Non-scalar arrays for the fill value are deprecated. Use arrays with scalar values instead. The filled function still supports any array as `fill_value`.'\n , DeprecationWarning, stacklevel=2)\n", (122398, 122599), False, 'import warnings\n'), ((125180, 125205), 'numpy.asanyarray', 'np.asanyarray', (['fill_value'], {}), '(fill_value)\n', (125193, 125205), True, 'import numpy as np\n'), ((131764, 131781), 'numpy.all', 'np.all', (['self.mask'], {}), '(self.mask)\n', (131770, 131781), True, 'import numpy as np\n'), ((133310, 133357), 'numpy.core.arrayprint.dtype_short_repr', 'np.core.arrayprint.dtype_short_repr', (['self.dtype'], {}), '(self.dtype)\n', (133345, 133357), True, 'import numpy as np\n'), ((135466, 135516), 'numpy.broadcast_to', 'np.broadcast_to', (['self', 'broadcast_shape'], {'subok': '(True)'}), '(self, broadcast_shape, subok=True)\n', (135481, 135516), True, 'import numpy as np\n'), ((143279, 143315), 'numpy.where', 'np.where', (['dom_mask', 'fval', 'other_data'], {}), '(dom_mask, fval, other_data)\n', (143287, 143315), True, 'import numpy as np\n'), ((143968, 144004), 'numpy.where', 'np.where', (['dom_mask', 'fval', 'other_data'], {}), '(dom_mask, fval, other_data)\n', (143976, 144004), True, 'import numpy as np\n'), ((144664, 144700), 'numpy.where', 'np.where', (['dom_mask', 'fval', 'other_data'], {}), '(dom_mask, fval, other_data)\n', (144672, 144700), True, 'import numpy as np\n'), ((145083, 145129), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (145094, 145129), True, 'import numpy as np\n'), ((145294, 145317), 'numpy.isfinite', 'np.isfinite', (['self._data'], {}), '(self._data)\n', (145305, 145317), True, 'import numpy as np\n'), ((145497, 145550), 'numpy.copyto', 'np.copyto', (['self._data', 'self.fill_value'], {'where': 'invalid'}), '(self._data, self.fill_value, where=invalid)\n', (145506, 145550), True, 'import numpy as np\n'), ((150611, 150648), 'numpy.core.numeric.normalize_axis_tuple', 'normalize_axis_tuple', (['axis', 'self.ndim'], {}), '(axis, self.ndim)\n', (150631, 150648), False, 'from numpy.core.numeric import normalize_axis_tuple\n'), ((151123, 151162), 'numpy.full', 'np.full', (['out_dims', 'items'], {'dtype': 'np.intp'}), '(out_dims, items, dtype=np.intp)\n', (151130, 151162), True, 'import numpy as np\n'), ((158594, 158621), 'numpy.array', 'narray', (['indices'], {'copy': '(False)'}), '(indices, copy=False)\n', (158600, 158621), True, 'from numpy import array as narray\n'), ((158644, 158682), 'numpy.array', 'narray', (['values'], {'copy': '(False)', 'subok': '(True)'}), '(values, copy=False, subok=True)\n', (158650, 158682), True, 'from numpy import array as narray\n'), ((190143, 190196), 'numpy.ndarray.sort', 'ndarray.sort', (['self'], {'axis': 'axis', 'kind': 'kind', 'order': 'order'}), '(self, axis=axis, kind=kind, order=order)\n', (190155, 190196), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((193018, 193055), 'numpy.copyto', 'np.copyto', (['out', 'np.nan'], {'where': 'newmask'}), '(out, np.nan, where=newmask)\n', (193027, 193055), True, 'import numpy as np\n'), ((197884, 197921), 'numpy.copyto', 'np.copyto', (['out', 'np.nan'], {'where': 'newmask'}), '(out, np.nan, where=newmask)\n', (197893, 197921), True, 'import numpy as np\n'), ((202585, 202639), 'numpy.take', 'np.take', (['_data', 'indices'], {'axis': 'axis', 'mode': 'mode', 'out': 'out'}), '(_data, indices, axis=axis, mode=mode, out=out)\n', (202592, 202639), True, 'import numpy as np\n'), ((211046, 211063), 'copy.deepcopy', 'deepcopy', (['v', 'memo'], {}), '(v, memo)\n', (211054, 211063), False, 'from copy import deepcopy\n'), ((217793, 217806), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (217801, 217806), True, 'import numpy as np\n'), ((217826, 217840), 'numpy.array', 'np.array', (['(True)'], {}), '(True)\n', (217834, 217840), True, 'import numpy as np\n'), ((224286, 224450), 'warnings.warn', 'warnings.warn', (['f"""Single-argument form of np.ma.{self.__name__} is deprecated. Use np.ma.{self.__name__}.reduce instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n f'Single-argument form of np.ma.{self.__name__} is deprecated. Use np.ma.{self.__name__}.reduce instead.'\n , DeprecationWarning, stacklevel=2)\n", (224299, 224450), False, 'import warnings\n'), ((224894, 225142), 'warnings.warn', 'warnings.warn', (['f"""In the future the default for ma.{self.__name__}.reduce will be axis=0, not the current None, to match np.{self.__name__}.reduce. Explicitly pass 0 or None to silence this warning."""', 'MaskedArrayFutureWarning'], {'stacklevel': '(2)'}), "(\n f'In the future the default for ma.{self.__name__}.reduce will be axis=0, not the current None, to match np.{self.__name__}.reduce. Explicitly pass 0 or None to silence this warning.'\n , MaskedArrayFutureWarning, stacklevel=2)\n", (224907, 225142), False, 'import warnings\n'), ((225615, 225652), 'numpy.core.umath.logical_and.reduce', 'umath.logical_and.reduce', (['m'], {}), '(m, **kwargs)\n', (225639, 225652), True, 'import numpy.core.umath as umath\n'), ((235985, 235998), 'numpy.diag', 'np.diag', (['v', 'k'], {}), '(v, k)\n', (235992, 235998), True, 'import numpy as np\n'), ((238329, 238368), 'numpy.copyto', 'np.copyto', (['a._mask', 'valmask'], {'where': 'mask'}), '(a._mask, valmask, where=mask)\n', (238338, 238368), True, 'import numpy as np\n'), ((238626, 238665), 'numpy.copyto', 'np.copyto', (['a._mask', 'valmask'], {'where': 'mask'}), '(a._mask, valmask, where=mask)\n', (238635, 238665), True, 'import numpy as np\n'), ((241869, 241892), 'numpy.resize', 'np.resize', (['x', 'new_shape'], {}), '(x, new_shape)\n', (241878, 241892), True, 'import numpy as np\n'), ((245341, 245369), 'numpy.zeros', 'np.zeros', (['()'], {'dtype': 'xd.dtype'}), '((), dtype=xd.dtype)\n', (245349, 245369), True, 'import numpy as np\n'), ((245384, 245411), 'numpy.ones', 'np.ones', (['()'], {'dtype': 'xm.dtype'}), '((), dtype=xm.dtype)\n', (245391, 245411), True, 'import numpy as np\n'), ((247830, 247868), 'numpy.choose', 'np.choose', (['c', 'data'], {'mode': 'mode', 'out': 'out'}), '(c, data, mode=mode, out=out)\n', (247839, 247868), True, 'import numpy as np\n'), ((251610, 251633), 'numpy.unique', 'np.unique', (['maskedval[0]'], {}), '(maskedval[0])\n', (251619, 251633), True, 'import numpy as np\n'), ((254363, 254377), 'numpy.dot', 'np.dot', (['am', 'bm'], {}), '(am, bm)\n', (254369, 254377), True, 'import numpy as np\n'), ((254420, 254433), 'numpy.asarray', 'np.asarray', (['d'], {}), '(d)\n', (254430, 254433), True, 'import numpy as np\n'), ((254659, 254686), 'numpy.empty', 'np.empty', (['d.shape', 'MaskType'], {}), '(d.shape, MaskType)\n', (254667, 254686), True, 'import numpy as np\n'), ((255199, 255215), 'numpy.inner', 'np.inner', (['fa', 'fb'], {}), '(fa, fb)\n', (255207, 255215), True, 'import numpy as np\n'), ((255710, 255734), 'numpy.outer', 'np.outer', (['(1 - ma)', '(1 - mb)'], {}), '(1 - ma, 1 - mb)\n', (255718, 255734), True, 'import numpy as np\n'), ((259816, 259833), 'numpy.core.umath.equal', 'umath.equal', (['x', 'y'], {}), '(x, y)\n', (259827, 259833), True, 'import numpy.core.umath as umath\n'), ((5352, 5367), 'numpy.compat.getargspec', 'getargspec', (['obj'], {}), '(obj)\n', (5362, 5367), False, 'from numpy.compat import getargspec, formatargspec, long, unicode, bytes\n'), ((8050, 8068), 'numpy.asanyarray', 'np.asanyarray', (['obj'], {}), '(obj)\n', (8063, 8068), True, 'import numpy as np\n'), ((14360, 14396), 'numpy.asarray', 'np.asarray', (['fill_value'], {'dtype': 'object'}), '(fill_value, dtype=object)\n', (14370, 14396), True, 'import numpy as np\n'), ((19771, 19787), 'numpy.array', 'np.array', (['a', '"""O"""'], {}), "(a, 'O')\n", (19779, 19787), True, 'import numpy as np\n'), ((19815, 19826), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (19823, 19826), True, 'import numpy as np\n'), ((24901, 24925), 'numpy.core.umath.greater', 'umath.greater', (['x', 'self.b'], {}), '(x, self.b)\n', (24914, 24925), True, 'import numpy.core.umath as umath\n'), ((24964, 24985), 'numpy.core.umath.less', 'umath.less', (['x', 'self.a'], {}), '(x, self.a)\n', (24974, 24985), True, 'import numpy.core.umath as umath\n'), ((25926, 25941), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (25934, 25941), True, 'import numpy as np\n'), ((26176, 26193), 'numpy.core.umath.absolute', 'umath.absolute', (['b'], {}), '(b)\n', (26190, 26193), True, 'import numpy.core.umath as umath\n'), ((28435, 28481), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (28446, 28481), True, 'import numpy as np\n'), ((28581, 28603), 'numpy.core.umath.isfinite', 'umath.isfinite', (['result'], {}), '(result)\n', (28595, 28603), True, 'import numpy.core.umath as umath\n'), ((28795, 28841), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (28806, 28841), True, 'import numpy as np\n'), ((29519, 29548), 'numpy.copyto', 'np.copyto', (['result', 'd'], {'where': 'm'}), '(result, d, where=m)\n', (29528, 29548), True, 'import numpy as np\n'), ((31558, 31582), 'numpy.core.umath.logical_or', 'umath.logical_or', (['ma', 'mb'], {}), '(ma, mb)\n', (31574, 31582), True, 'import numpy.core.umath as umath\n'), ((31945, 31993), 'numpy.copyto', 'np.copyto', (['result', 'da'], {'casting': '"""unsafe"""', 'where': 'm'}), "(result, da, casting='unsafe', where=m)\n", (31954, 31993), True, 'import numpy as np\n'), ((53838, 53889), 'numpy.core.umath.logical_or', 'umath.logical_or', (['current1', 'm2[name]', 'newmask[name]'], {}), '(current1, m2[name], newmask[name])\n', (53854, 53889), True, 'import numpy.core.umath as umath\n'), ((54601, 54621), 'numpy.broadcast', 'np.broadcast', (['m1', 'm2'], {}), '(m1, m2)\n', (54613, 54621), True, 'import numpy as np\n'), ((89866, 89900), 'numpy.ones', 'np.ones', (['_data.shape'], {'dtype': 'mdtype'}), '(_data.shape, dtype=mdtype)\n', (89873, 89900), True, 'import numpy as np\n'), ((103214, 103232), 'numpy.ndarray.view', 'ndarray.view', (['self'], {}), '(self)\n', (103226, 103232), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103278, 103302), 'numpy.ndarray.view', 'ndarray.view', (['self', 'type'], {}), '(self, type)\n', (103290, 103302), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103686, 103717), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype', 'type'], {}), '(self, dtype, type)\n', (103698, 103717), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((131004, 131025), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (131023, 131025), True, 'import numpy as np\n'), ((131700, 131747), 'numpy.core.arrayprint.dtype_is_implied', 'np.core.arrayprint.dtype_is_implied', (['self.dtype'], {}), '(self.dtype)\n', (131735, 131747), True, 'import numpy as np\n'), ((135408, 135433), 'numpy.broadcast', 'np.broadcast', (['self', 'odata'], {}), '(self, odata)\n', (135420, 135433), True, 'import numpy as np\n'), ((135778, 135801), 'numpy.ones', 'np.ones', (['()', 'mask.dtype'], {}), '((), mask.dtype)\n', (135785, 135801), True, 'import numpy as np\n'), ((145942, 146017), 'warnings.warn', 'warnings.warn', (['"""Warning: converting a masked element to nan."""'], {'stacklevel': '(2)'}), "('Warning: converting a masked element to nan.', stacklevel=2)\n", (145955, 146017), False, 'import warnings\n'), ((150054, 150090), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.bool_'}), '(self.shape, dtype=np.bool_)\n', (150062, 150090), True, 'import numpy as np\n'), ((153246, 153284), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._data'], {'order': 'order'}), '(self._data, order=order)\n', (153259, 153284), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((177223, 177243), 'numpy.expand_dims', 'expand_dims', (['m', 'axis'], {}), '(m, axis)\n', (177234, 177243), False, 'from numpy import expand_dims\n'), ((178446, 178467), 'numpy.core.umath.absolute', 'umath.absolute', (['danom'], {}), '(danom)\n', (178460, 178467), True, 'import numpy.core.umath as umath\n'), ((180379, 180424), 'numpy.power', 'np.power', (['out', '(0.5)'], {'out': 'out', 'casting': '"""unsafe"""'}), "(out, 0.5, out=out, casting='unsafe')\n", (180387, 180424), True, 'import numpy as np\n'), ((184127, 184165), 'numpy.issubdtype', 'np.issubdtype', (['self.dtype', 'np.floating'], {}), '(self.dtype, np.floating)\n', (184140, 184165), True, 'import numpy as np\n'), ((219870, 220032), 'warnings.warn', 'warnings.warn', (['"""Format strings passed to MaskedConstant are ignored, but in future may error or produce different behavior"""', 'FutureWarning'], {'stacklevel': '(2)'}), "(\n 'Format strings passed to MaskedConstant are ignored, but in future may error or produce different behavior'\n , FutureWarning, stacklevel=2)\n", (219883, 220032), False, 'import warnings\n'), ((238472, 238505), 'numpy.copyto', 'np.copyto', (['m', 'valmask'], {'where': 'mask'}), '(m, valmask, where=mask)\n', (238481, 238505), True, 'import numpy as np\n'), ((251689, 251712), 'numpy.unique', 'np.unique', (['maskedval[1]'], {}), '(maskedval[1])\n', (251698, 251712), True, 'import numpy as np\n'), ((14098, 14144), 'numpy.array', 'np.array', (['fill_value'], {'copy': '(False)', 'dtype': 'ndtype'}), '(fill_value, copy=False, dtype=ndtype)\n', (14106, 14144), True, 'import numpy as np\n'), ((14988, 15034), 'numpy.array', 'np.array', (['fill_value'], {'copy': '(False)', 'dtype': 'ndtype'}), '(fill_value, copy=False, dtype=ndtype)\n', (14996, 15034), True, 'import numpy as np\n'), ((25429, 25441), 'numpy.core.umath.cos', 'umath.cos', (['x'], {}), '(x)\n', (25438, 25441), True, 'import numpy.core.umath as umath\n'), ((26138, 26155), 'numpy.core.umath.absolute', 'umath.absolute', (['a'], {}), '(a)\n', (26152, 26155), True, 'import numpy.core.umath as umath\n'), ((88633, 88668), 'numpy.zeros', 'np.zeros', (['_data.shape'], {'dtype': 'mdtype'}), '(_data.shape, dtype=mdtype)\n', (88641, 88668), True, 'import numpy as np\n'), ((89981, 90016), 'numpy.zeros', 'np.zeros', (['_data.shape'], {'dtype': 'mdtype'}), '(_data.shape, dtype=mdtype)\n', (89989, 90016), True, 'import numpy as np\n'), ((90624, 90652), 'numpy.resize', 'np.resize', (['mask', '_data.shape'], {}), '(mask, _data.shape)\n', (90633, 90652), True, 'import numpy as np\n'), ((99133, 99179), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (99144, 99179), True, 'import numpy as np\n'), ((99826, 99864), 'numpy.copyto', 'np.copyto', (['result', 'fill_value'], {'where': 'd'}), '(result, fill_value, where=d)\n', (99835, 99864), True, 'import numpy as np\n'), ((115411, 115450), 'numpy.array', 'np.array', (['mask'], {'copy': 'copy', 'dtype': 'mdtype'}), '(mask, copy=copy, dtype=mdtype)\n', (115419, 115450), True, 'import numpy as np\n'), ((125504, 125542), 'numpy.copyto', 'np.copyto', (['result', 'fill_value'], {'where': 'm'}), '(result, fill_value, where=m)\n', (125513, 125542), True, 'import numpy as np\n'), ((126734, 126759), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._mask'], {}), '(self._mask)\n', (126747, 126759), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((150326, 150365), 'numpy.AxisError', 'np.AxisError', ([], {'axis': 'axis', 'ndim': 'self.ndim'}), '(axis=axis, ndim=self.ndim)\n', (150338, 150365), True, 'import numpy as np\n'), ((153393, 153431), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._mask'], {'order': 'order'}), '(self._mask, order=order)\n', (153406, 153431), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((192331, 192382), 'numpy.copyto', 'np.copyto', (['result', 'result.fill_value'], {'where': 'newmask'}), '(result, result.fill_value, where=newmask)\n', (192340, 192382), True, 'import numpy as np\n'), ((197195, 197246), 'numpy.copyto', 'np.copyto', (['result', 'result.fill_value'], {'where': 'newmask'}), '(result, result.fill_value, where=newmask)\n', (197204, 197246), True, 'import numpy as np\n'), ((212107, 212120), 'numpy.void', 'np.void', (['mask'], {}), '(mask)\n', (212114, 212120), True, 'import numpy as np\n'), ((231524, 231543), 'numpy.core.umath.power', 'umath.power', (['fa', 'fb'], {}), '(fa, fb)\n', (231535, 231543), True, 'import numpy.core.umath as umath\n'), ((237484, 237505), 'numpy.array', 'narray', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (237490, 237505), True, 'from numpy import array as narray\n'), ((240105, 240126), 'numpy.array', 'narray', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (240111, 240126), True, 'from numpy import array as narray\n'), ((256215, 256226), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (256223, 256226), True, 'import numpy as np\n'), ((256275, 256286), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (256283, 256286), True, 'import numpy as np\n'), ((263046, 263057), 'numpy.isinf', 'np.isinf', (['y'], {}), '(y)\n', (263054, 263057), True, 'import numpy as np\n'), ((13439, 13467), 'numpy.array', 'np.array', (['fval'], {'dtype': 'cdtype'}), '(fval, dtype=cdtype)\n', (13447, 13467), True, 'import numpy as np\n'), ((90143, 90182), 'numpy.array', 'np.array', (['mask'], {'copy': 'copy', 'dtype': 'mdtype'}), '(mask, copy=copy, dtype=mdtype)\n', (90151, 90182), True, 'import numpy as np\n'), ((90713, 90742), 'numpy.reshape', 'np.reshape', (['mask', '_data.shape'], {}), '(mask, _data.shape)\n', (90723, 90742), True, 'import numpy as np\n'), ((91937, 91969), 'numpy.logical_or', 'np.logical_or', (['mask', '_data._mask'], {}), '(mask, _data._mask)\n', (91950, 91969), True, 'import numpy as np\n'), ((103427, 103452), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype'], {}), '(self, dtype)\n', (103439, 103452), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103540, 103565), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype'], {}), '(self, dtype)\n', (103552, 103565), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103623, 103648), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype'], {}), '(self, dtype)\n', (103635, 103648), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((111991, 112015), 'numpy.core.umath.logical_not', 'umath.logical_not', (['_mask'], {}), '(_mask)\n', (112008, 112015), True, 'import numpy.core.umath as umath\n'), ((112377, 112413), 'numpy.copyto', 'np.copyto', (['dindx', 'dval'], {'where': '(~mindx)'}), '(dindx, dval, where=~mindx)\n', (112386, 112413), True, 'import numpy as np\n'), ((125622, 125654), 'numpy.array', 'narray', (['fill_value'], {'dtype': 'object'}), '(fill_value, dtype=object)\n', (125628, 125654), True, 'from numpy import array as narray\n'), ((125724, 125753), 'numpy.choose', 'np.choose', (['m', '(d, fill_value)'], {}), '(m, (d, fill_value))\n', (125733, 125753), True, 'import numpy as np\n'), ((130113, 130151), 'numpy.split', 'np.split', (['data', '(ind, -ind)'], {'axis': 'axis'}), '(data, (ind, -ind), axis=axis)\n', (130121, 130151), True, 'import numpy as np\n'), ((130184, 130227), 'numpy.concatenate', 'np.concatenate', (['(arr[0], arr[2])'], {'axis': 'axis'}), '((arr[0], arr[2]), axis=axis)\n', (130198, 130227), True, 'import numpy as np\n'), ((130259, 130297), 'numpy.split', 'np.split', (['mask', '(ind, -ind)'], {'axis': 'axis'}), '(mask, (ind, -ind), axis=axis)\n', (130267, 130297), True, 'import numpy as np\n'), ((130330, 130373), 'numpy.concatenate', 'np.concatenate', (['(arr[0], arr[2])'], {'axis': 'axis'}), '((arr[0], arr[2]), axis=axis)\n', (130344, 130373), True, 'import numpy as np\n'), ((136583, 136617), 'numpy.broadcast_to', 'np.broadcast_to', (['mask', 'check.shape'], {}), '(mask, check.shape)\n', (136598, 136617), True, 'import numpy as np\n'), ((150503, 150554), 'numpy.array', 'np.array', (['self.size'], {'dtype': 'np.intp', 'ndmin': 'self.ndim'}), '(self.size, dtype=np.intp, ndmin=self.ndim)\n', (150511, 150554), True, 'import numpy as np\n'), ((109073, 109290), 'warnings.warn', 'warnings.warn', (['f"""Upon accessing multidimensional field {indx!s}, need to keep dimensionality of fill_value at 0. Discarding heterogeneous fill_value and setting all to {dout._fill_value[0]!s}."""'], {'stacklevel': '(2)'}), "(\n f'Upon accessing multidimensional field {indx!s}, need to keep dimensionality of fill_value at 0. Discarding heterogeneous fill_value and setting all to {dout._fill_value[0]!s}.'\n , stacklevel=2)\n", (109086, 109290), False, 'import warnings\n'), ((212296, 212324), 'numpy.array', 'np.array', (['mask'], {'dtype': 'mdtype'}), '(mask, dtype=mdtype)\n', (212304, 212324), True, 'import numpy as np\n'), ((239605, 239626), 'numpy.array', 'narray', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (239611, 239626), True, 'from numpy import array as narray\n'), ((125939, 125977), 'numpy.array', 'np.array', (['fill_value'], {'dtype': 'self.dtype'}), '(fill_value, dtype=self.dtype)\n', (125947, 125977), True, 'import numpy as np\n'), ((88903, 88933), 'numpy.asanyarray', 'np.asanyarray', (['m'], {'dtype': 'mdtype'}), '(m, dtype=mdtype)\n', (88916, 88933), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Various retriever utilities.
"""
import regex
import unicodedata
import numpy as np
import scipy.sparse as sp
from sklearn.utils import murmurhash3_32
try:
import torch
except ImportError:
raise ImportError('Need to install Pytorch: go to pytorch.org')
# ------------------------------------------------------------------------------
# Sparse matrix saving/loading helpers.
# ------------------------------------------------------------------------------
def save_sparse_csr(filename, matrix, metadata=None):
data = {
'data': matrix.data,
'indices': matrix.indices,
'indptr': matrix.indptr,
'shape': matrix.shape,
'metadata': metadata,
}
np.savez(filename, **data)
def save_sparse_tensor(filename, matrix, metadata=None):
data = {
'indices': matrix._indices(),
'values': matrix._values(),
'size': matrix.size(),
'metadata': metadata,
}
torch.save(data, filename)
def load_sparse_csr(filename):
loader = np.load(filename + '.npz', allow_pickle=True)
matrix = sp.csr_matrix(
(loader['data'], loader['indices'], loader['indptr']), shape=loader['shape']
)
return matrix, loader['metadata'].item(0) if 'metadata' in loader else None
def load_sparse_tensor(filename):
loader = torch.load(filename)
matrix = torch.sparse.FloatTensor(
loader['indices'], loader['values'], loader['size']
)
return matrix, loader['metadata'] if 'metadata' in loader else None
# ------------------------------------------------------------------------------
# Token hashing.
# ------------------------------------------------------------------------------
def hash(token, num_buckets):
"""
Unsigned 32 bit murmurhash for feature hashing.
"""
return murmurhash3_32(token, positive=True) % num_buckets
# ------------------------------------------------------------------------------
# Text cleaning.
# ------------------------------------------------------------------------------
STOPWORDS = {
'i',
'me',
'my',
'myself',
'we',
'our',
'ours',
'ourselves',
'you',
'your',
'yours',
'yourself',
'yourselves',
'he',
'him',
'his',
'himself',
'she',
'her',
'hers',
'herself',
'it',
'its',
'itself',
'they',
'them',
'their',
'theirs',
'themselves',
'what',
'which',
'who',
'whom',
'this',
'that',
'these',
'those',
'am',
'is',
'are',
'was',
'were',
'be',
'been',
'being',
'have',
'has',
'had',
'having',
'do',
'does',
'did',
'doing',
'a',
'an',
'the',
'and',
'but',
'if',
'or',
'because',
'as',
'until',
'while',
'of',
'at',
'by',
'for',
'with',
'about',
'against',
'between',
'into',
'through',
'during',
'before',
'after',
'above',
'below',
'to',
'from',
'up',
'down',
'in',
'out',
'on',
'off',
'over',
'under',
'again',
'further',
'then',
'once',
'here',
'there',
'when',
'where',
'why',
'how',
'all',
'any',
'both',
'each',
'few',
'more',
'most',
'other',
'some',
'such',
'no',
'nor',
'not',
'only',
'own',
'same',
'so',
'than',
'too',
'very',
's',
't',
'can',
'will',
'just',
'don',
'should',
'now',
'd',
'll',
'm',
'o',
're',
've',
'y',
'ain',
'aren',
'couldn',
'didn',
'doesn',
'hadn',
'hasn',
'haven',
'isn',
'ma',
'mightn',
'mustn',
'needn',
'shan',
'shouldn',
'wasn',
'weren',
'won',
'wouldn',
"'ll",
"'re",
"'ve",
"n't",
"'s",
"'d",
"'m",
"''",
"``",
}
def normalize(text):
"""
Resolve different type of unicode encodings.
"""
if type(text) != str:
return text
return unicodedata.normalize('NFD', text)
def filter_word(text):
"""
Take out english stopwords, punctuation, and compound endings.
"""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""
Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode)
| [
"unicodedata.normalize",
"numpy.load",
"torch.load",
"torch.save",
"scipy.sparse.csr_matrix",
"regex.match",
"numpy.savez",
"torch.sparse.FloatTensor",
"sklearn.utils.murmurhash3_32"
] | [((908, 934), 'numpy.savez', 'np.savez', (['filename'], {}), '(filename, **data)\n', (916, 934), True, 'import numpy as np\n'), ((1152, 1178), 'torch.save', 'torch.save', (['data', 'filename'], {}), '(data, filename)\n', (1162, 1178), False, 'import torch\n'), ((1225, 1270), 'numpy.load', 'np.load', (["(filename + '.npz')"], {'allow_pickle': '(True)'}), "(filename + '.npz', allow_pickle=True)\n", (1232, 1270), True, 'import numpy as np\n'), ((1284, 1380), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (["(loader['data'], loader['indices'], loader['indptr'])"], {'shape': "loader['shape']"}), "((loader['data'], loader['indices'], loader['indptr']), shape=\n loader['shape'])\n", (1297, 1380), True, 'import scipy.sparse as sp\n'), ((1519, 1539), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (1529, 1539), False, 'import torch\n'), ((1553, 1630), 'torch.sparse.FloatTensor', 'torch.sparse.FloatTensor', (["loader['indices']", "loader['values']", "loader['size']"], {}), "(loader['indices'], loader['values'], loader['size'])\n", (1577, 1630), False, 'import torch\n'), ((4312, 4346), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (4333, 4346), False, 'import unicodedata\n'), ((4489, 4519), 'regex.match', 'regex.match', (['"""^\\\\p{P}+$"""', 'text'], {}), "('^\\\\p{P}+$', text)\n", (4500, 4519), False, 'import regex\n'), ((2009, 2045), 'sklearn.utils.murmurhash3_32', 'murmurhash3_32', (['token'], {'positive': '(True)'}), '(token, positive=True)\n', (2023, 2045), False, 'from sklearn.utils import murmurhash3_32\n')] |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import xarray as xr
from scipy import optimize
from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d
def calc_vph_current(b_xyz, j_xyz):
"""Estimates the phase speed of the oscillating current sheet using
oscillations of J_N.
Parameters
----------
b_xyz : xarray.DataArray
Time series of the magnetic field.
j_xyz : xarray.DataArray
Time series of the current density.
Returns
-------
disprel : xarray.Dataset
Hash table. to fill
"""
# Time derivative of Bl
dbl_dt = gradient(b_xyz[:, 0])
n_bins = optimize_nbins_2d(dbl_dt, j_xyz[:, 2])
hist_dbl_dt_jn = histogram2d(dbl_dt, j_xyz[:, 2], bins=n_bins)
# Linear model for jn vs dBdt
def model_jn(x, a):
return a * x
v_phase_j, sigma_dbl_dt_jn = optimize.curve_fit(model_jn, dbl_dt.data,
j_xyz[:, 2].data)
v_phase_j = v_phase_j[0]
corr_coeffs = np.corrcoef(dbl_dt.data, j_xyz[:, 2].data)
rho = corr_coeffs[0, 1]
# v_phase_j = -3.12
sigma_dbl_dt_jn = np.sqrt(float(sigma_dbl_dt_jn))
dbl_dt_min = -1.2 * np.max(dbl_dt)
dbl_dt_max = 1.2 * np.max(dbl_dt)
disprel = {"fit_db_dt_jn": v_phase_j, "hist": hist_dbl_dt_jn,
"rho": rho, "sigma": sigma_dbl_dt_jn,
"hires_dBdt": np.linspace(dbl_dt_min, dbl_dt_max, 100),
"pred_Jn": (["hires_dBdt"],
model_jn(np.linspace(dbl_dt_min, dbl_dt_max, 100),
v_phase_j)),
"bound_upper": (["hires_dBdt"],
model_jn(np.linspace(dbl_dt_min, dbl_dt_max,
100),
v_phase_j + 1.92 * sigma_dbl_dt_jn)),
"bound_lower": (["hires_dBdt"],
model_jn(np.linspace(dbl_dt_min, dbl_dt_max,
100),
v_phase_j - 1.92 * sigma_dbl_dt_jn))}
disprel = xr.Dataset(disprel)
return disprel
| [
"pyrfu.pyrf.optimize_nbins_2d",
"numpy.corrcoef",
"pyrfu.pyrf.gradient",
"scipy.optimize.curve_fit",
"xarray.Dataset",
"numpy.max",
"pyrfu.pyrf.histogram2d",
"numpy.linspace"
] | [((1150, 1171), 'pyrfu.pyrf.gradient', 'gradient', (['b_xyz[:, 0]'], {}), '(b_xyz[:, 0])\n', (1158, 1171), False, 'from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d\n'), ((1186, 1224), 'pyrfu.pyrf.optimize_nbins_2d', 'optimize_nbins_2d', (['dbl_dt', 'j_xyz[:, 2]'], {}), '(dbl_dt, j_xyz[:, 2])\n', (1203, 1224), False, 'from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d\n'), ((1246, 1291), 'pyrfu.pyrf.histogram2d', 'histogram2d', (['dbl_dt', 'j_xyz[:, 2]'], {'bins': 'n_bins'}), '(dbl_dt, j_xyz[:, 2], bins=n_bins)\n', (1257, 1291), False, 'from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d\n'), ((1406, 1465), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['model_jn', 'dbl_dt.data', 'j_xyz[:, 2].data'], {}), '(model_jn, dbl_dt.data, j_xyz[:, 2].data)\n', (1424, 1465), False, 'from scipy import optimize\n'), ((1565, 1607), 'numpy.corrcoef', 'np.corrcoef', (['dbl_dt.data', 'j_xyz[:, 2].data'], {}), '(dbl_dt.data, j_xyz[:, 2].data)\n', (1576, 1607), True, 'import numpy as np\n'), ((2686, 2705), 'xarray.Dataset', 'xr.Dataset', (['disprel'], {}), '(disprel)\n', (2696, 2705), True, 'import xarray as xr\n'), ((1739, 1753), 'numpy.max', 'np.max', (['dbl_dt'], {}), '(dbl_dt)\n', (1745, 1753), True, 'import numpy as np\n'), ((1777, 1791), 'numpy.max', 'np.max', (['dbl_dt'], {}), '(dbl_dt)\n', (1783, 1791), True, 'import numpy as np\n'), ((1941, 1981), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (1952, 1981), True, 'import numpy as np\n'), ((2062, 2102), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (2073, 2102), True, 'import numpy as np\n'), ((2240, 2280), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (2251, 2280), True, 'import numpy as np\n'), ((2499, 2539), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (2510, 2539), True, 'import numpy as np\n')] |
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: <NAME>, <NAME>, <NAME>
from os.path import exists
import os
import sqlite3
import sys
from optparse import OptionParser
plottingEnabled = True
try:
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
except ImportError:
print('Matplotlib or Numpy was not found; disabling plotting capabilities...')
plottingEnabled = False
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens):
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens={}):
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens={}):
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result is None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line is None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line is None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames, moveitformat):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
if sys.version_info[0] < 3:
conn.text_factory = lambda x: unicode(x, 'utf-8', 'ignore')
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename, 'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname is None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version is None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
# optional experiment properties
nrexpprops = int(readOptionalLogValue(logfile, 0, \
{-2: "experiment", -1: "properties"}) or 0)
expprops = {}
for _ in range(nrexpprops):
entry = logfile.readline().strip().split('=')
nameAndType = entry[0].split(' ')
expprops[nameAndType[0]] = (entry[1], nameAndType[1])
# adding columns to experiments table
c.execute('PRAGMA table_info(experiments)')
columnNames = [col[1] for col in c.fetchall()]
for name in sorted(expprops.keys()):
# only add column if it doesn't exist
if name not in columnNames:
c.execute('ALTER TABLE experiments ADD %s %s' % (name, expprops[name][1]))
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
if moveitformat:
expsetup = readRequiredLogValue("goal name", logfile, -1, {0: "Goal", 1: "name"})
cpuinfo = None
rseed = 0
timelimit = float(readRequiredLogValue("time limit", logfile, 0, \
{-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = 0
else:
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, \
{-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, \
{-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, \
{-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, \
{-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, \
{-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for _ in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() is None:
for j in range(len(enum) - 1):
c.execute('INSERT INTO enums VALUES (?,?,?)', \
(enum[0], j, enum[j + 1]))
# Creating entry in experiments table
experimentEntries = [None, expname, totaltime, timelimit, memorylimit, nrruns, version,
hostname, cpuinfo, date, rseed, expsetup]
for name in sorted(expprops.keys()): # sort to ensure correct order
experimentEntries.append(expprops[name][0])
c.execute('INSERT INTO experiments VALUES (' + ','.join(
'?' for i in experimentEntries) + ')', experimentEntries)
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for _ in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)', \
(plannerName, settings,))
p = c.fetchone()
if p is None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)', \
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if not x or x == 'nan' or x == 'inf' else x \
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' % \
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if not x or x == 'nan' or x == 'inf' else x \
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing '
'ompl::tools::Benchmark::Request::timeBetweenUpdates.')
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [t[0] for t in cur.fetchall()]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [t[0] for t in cur.fetchall() if t[0] != None]
if measurement:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if not measurements:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1, measurements.shape[1]))
ind = range(measurements.shape[1])
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0], \
color=matplotlib.cm.hot(int(floor(i * 256 / numValues))), \
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_', ' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop=props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_', ' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0]) < 1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_', ' '))
xtickNames = plt.setp(ax, xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts) > 0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i + width / 2 if typename == 'BOOLEAN' else i + 1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_', ' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % \
(attribute, r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if plannerNames:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0], t[1].replace('geometric_', '').replace('control_', '')) \
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump, 'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [str(t[0]) for t in c.fetchall()]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION', 'COMMIT', \
'sqlite_sequence', 'CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line:
break
else:
process = True
if not process:
continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"', '`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname, moveitformat):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
if moveitformat:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
elif 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db", \
help="Filename of benchmark database [default: %default]")
parser.add_option("-a", "--append", action="store_true", dest="append", default=False, \
help="Append data to database (as opposed to overwriting an existing database)")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False, \
help="Compute the views for best planner configurations")
if plottingEnabled:
parser.add_option("-p", "--plot", dest="plot", default=None, \
help="Create a PDF of plots")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None, \
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("--moveit", action="store_true", dest="moveit", default=False, \
help="Log files are produced by MoveIt!")
(options, args) = parser.parse_args()
if not options.append and exists(options.dbname) and args:
os.remove(options.dbname)
if args:
readBenchmarkLog(options.dbname, args, options.moveit)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname, options.moveit)
if plottingEnabled and options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
| [
"matplotlib.backends.backend_pdf.PdfPages",
"os.remove",
"numpy.sum",
"optparse.OptionParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.bar",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.font_manager.FontProperties",
"numpy.std",
"os.path.exists",
"matpl... | [((1977, 1998), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (1991, 1998), False, 'import matplotlib\n'), ((4389, 4412), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (4404, 4412), False, 'import sqlite3\n'), ((15219, 15228), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15226, 15228), True, 'import matplotlib.pyplot as plt\n'), ((15238, 15247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15245, 15247), True, 'import matplotlib.pyplot as plt\n'), ((17369, 17379), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17377, 17379), True, 'import matplotlib.pyplot as plt\n'), ((17625, 17634), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17632, 17634), True, 'import matplotlib.pyplot as plt\n'), ((17644, 17653), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (17651, 17653), True, 'import matplotlib.pyplot as plt\n'), ((19726, 19749), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (19741, 19749), False, 'import sqlite3\n'), ((20061, 20076), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['fname'], {}), '(fname)\n', (20069, 20076), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((20484, 20493), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20491, 20493), True, 'import matplotlib.pyplot as plt\n'), ((21263, 21273), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21271, 21273), True, 'import matplotlib.pyplot as plt\n'), ((21488, 21511), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (21503, 21511), False, 'import sqlite3\n'), ((23570, 23593), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (23585, 23593), False, 'import sqlite3\n'), ((25859, 25878), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (25871, 25878), False, 'from optparse import OptionParser\n'), ((15372, 15400), 'numpy.sum', 'np.sum', (['measurements'], {'axis': '(1)'}), '(measurements, axis=1)\n', (15378, 15400), True, 'import numpy as np\n'), ((15459, 15495), 'numpy.zeros', 'np.zeros', (['(1, measurements.shape[1])'], {}), '((1, measurements.shape[1]))\n', (15467, 15495), True, 'import numpy as np\n'), ((15816, 15881), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[(x + width / 2.0) for x in ind]', 'labels'], {'rotation': '(30)'}), '([(x + width / 2.0) for x in ind], labels, rotation=30)\n', (15826, 15881), True, 'import matplotlib.pyplot as plt\n'), ((16054, 16094), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {}), '()\n', (16092, 16094), False, 'import matplotlib\n'), ((19544, 19554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19552, 19554), True, 'import matplotlib.pyplot as plt\n'), ((19573, 19582), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (19580, 19582), True, 'import matplotlib.pyplot as plt\n'), ((20942, 21002), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', 'pagey', '(\'Experiment "%s"\' % experiment[1])'], {}), '(pagex, pagey, \'Experiment "%s"\' % experiment[1])\n', (20953, 21002), True, 'import matplotlib.pyplot as plt\n'), ((21011, 21084), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.05)', "('Number of averaged runs: %d' % numRuns)"], {}), "(pagex, pagey - 0.05, 'Number of averaged runs: %d' % numRuns)\n", (21022, 21084), True, 'import matplotlib.pyplot as plt\n'), ((21091, 21176), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.1)', "('Time limit per run: %g seconds' % experiment[2])"], {}), "(pagex, pagey - 0.1, 'Time limit per run: %g seconds' %\n experiment[2])\n", (21102, 21176), True, 'import matplotlib.pyplot as plt\n'), ((21180, 21259), 'matplotlib.pyplot.figtext', 'plt.figtext', (['pagex', '(pagey - 0.15)', "('Memory limit per run: %g MB' % experiment[3])"], {}), "(pagex, pagey - 0.15, 'Memory limit per run: %g MB' % experiment[3])\n", (21191, 21259), True, 'import matplotlib.pyplot as plt\n'), ((21289, 21298), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21296, 21298), True, 'import matplotlib.pyplot as plt\n'), ((22478, 22512), 're.sub', 're.sub', (['"""[\\\\n\\\\r\\\\t ]+"""', '""" """', 'line'], {}), "('[\\\\n\\\\r\\\\t ]+', ' ', line)\n", (22484, 22512), False, 'import re\n'), ((22523, 22574), 're.search', 're.search', (['"""CREATE TABLE ([a-zA-Z0-9_]*)(.*)"""', 'line'], {}), "('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)\n", (22532, 22574), False, 'import re\n'), ((23184, 23234), 're.sub', 're.sub', (['"""([^\'])\'t\'(.)"""', '"""\\\\1THIS_IS_TRUE\\\\2"""', 'line'], {}), '("([^\'])\'t\'(.)", \'\\\\1THIS_IS_TRUE\\\\2\', line)\n', (23190, 23234), False, 'import re\n'), ((23300, 23351), 're.sub', 're.sub', (['"""([^\'])\'f\'(.)"""', '"""\\\\1THIS_IS_FALSE\\\\2"""', 'line'], {}), '("([^\'])\'f\'(.)", \'\\\\1THIS_IS_FALSE\\\\2\', line)\n', (23306, 23351), False, 'import re\n'), ((26843, 26865), 'os.path.exists', 'exists', (['options.dbname'], {}), '(options.dbname)\n', (26849, 26865), False, 'from os.path import exists\n'), ((26884, 26909), 'os.remove', 'os.remove', (['options.dbname'], {}), '(options.dbname)\n', (26893, 26909), False, 'import os\n'), ((15330, 15353), 'numpy.vstack', 'np.vstack', (['measurements'], {}), '(measurements)\n', (15339, 15353), True, 'import numpy as np\n'), ((15416, 15437), 'numpy.where', 'np.where', (['(colsum != 0)'], {}), '(colsum != 0)\n', (15424, 15437), True, 'import numpy as np\n'), ((16379, 16422), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'measurementsPercentage', 'width'], {}), '(ind, measurementsPercentage, width)\n', (16386, 16422), True, 'import matplotlib.pyplot as plt\n'), ((16444, 16509), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[(x + width / 2.0) for x in ind]', 'labels'], {'rotation': '(30)'}), '([(x + width / 2.0) for x in ind], labels, rotation=30)\n', (16454, 16509), True, 'import matplotlib.pyplot as plt\n'), ((16882, 16914), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticklabels': 'labels'}), '(ax, xticklabels=labels)\n', (16890, 16914), True, 'import matplotlib.pyplot as plt\n'), ((16923, 16956), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(25)'}), '(xtickNames, rotation=25)\n', (16931, 16956), True, 'import matplotlib.pyplot as plt\n'), ((19018, 19056), 'numpy.array', 'np.array', (['timeTable[0][:fewestSamples]'], {}), '(timeTable[0][:fewestSamples])\n', (19026, 19056), True, 'import numpy as np\n'), ((19082, 19136), 'numpy.array', 'np.array', (['[data[:fewestSamples] for data in dataTable]'], {}), '([data[:fewestSamples] for data in dataTable])\n', (19090, 19136), True, 'import numpy as np\n'), ((19254, 19283), 'numpy.mean', 'np.mean', (['filteredData'], {'axis': '(0)'}), '(filteredData, axis=0)\n', (19261, 19283), True, 'import numpy as np\n'), ((19306, 19342), 'numpy.std', 'np.std', (['filteredData'], {'axis': '(0)', 'ddof': '(1)'}), '(filteredData, axis=0, ddof=1)\n', (19312, 19342), True, 'import numpy as np\n'), ((20469, 20478), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20476, 20478), True, 'import matplotlib.pyplot as plt\n'), ((22946, 22998), 're.search', 're.search', (['"""INSERT INTO "([a-zA-Z0-9_]*)"(.*)"""', 'line'], {}), '(\'INSERT INTO "([a-zA-Z0-9_]*)"(.*)\', line)\n', (22955, 22998), False, 'import re\n'), ((16642, 16704), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['measurements'], {'notch': '(0)', 'sym': '"""k+"""', 'vert': '(1)', 'whis': '(1.5)'}), "(measurements, notch=0, sym='k+', vert=1, whis=1.5)\n", (16653, 16704), True, 'import matplotlib.pyplot as plt\n'), ((16731, 16809), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['measurements'], {'notch': '(0)', 'sym': '"""k+"""', 'vert': '(1)', 'whis': '(1.5)', 'bootstrap': '(1000)'}), "(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)\n", (16742, 16809), True, 'import matplotlib.pyplot as plt\n'), ((19192, 19218), 'numpy.equal', 'np.equal', (['dataArrays', 'None'], {}), '(dataArrays, None)\n', (19200, 19218), True, 'import numpy as np\n'), ((20287, 20296), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20294, 20296), True, 'import matplotlib.pyplot as plt\n'), ((16592, 16620), 'matplotlib.__version__.split', 'matplotlibversion.split', (['"""."""'], {}), "('.')\n", (16615, 16620), True, 'from matplotlib import __version__ as matplotlibversion\n'), ((15676, 15702), 'math.floor', 'floor', (['(i * 256 / numValues)'], {}), '(i * 256 / numValues)\n', (15681, 15702), False, 'from math import floor\n')] |
import os
import numpy as np
from wavedata.tools.core import calib_utils
class ObjectLabel:
"""Object Label Class
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
"""
def __init__(self):
self.type = "" # Type of object
self.truncation = 0.
self.occlusion = 0.
self.alpha = 0.
self.x1 = 0.
self.y1 = 0.
self.x2 = 0.
self.y2 = 0.
self.h = 0.
self.w = 0.
self.l = 0.
self.t = (0., 0., 0.)
self.ry = 0.
self.score = 0.
def __eq__(self, other):
"""Compares the given object to the current ObjectLabel instance.
:param other: object to compare to this instance against
:return: True, if other and current instance is the same
"""
if not isinstance(other, ObjectLabel):
return False
if self.__dict__ != other.__dict__:
return False
else:
return True
def read_labels(label_dir, img_idx, results=False):
"""Reads in label data file from Kitti Dataset.
Returns:
obj_list -- List of instances of class ObjectLabel.
Keyword arguments:
label_dir -- directory of the label files
img_idx -- index of the image
"""
# Define the object list
obj_list = []
# Extract the list
if os.stat(label_dir + "/%06d.txt" % img_idx).st_size == 0:
return
if results:
p = np.loadtxt(label_dir + "/%06d.txt" % img_idx, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=16))
else:
p = np.loadtxt(label_dir + "/%06d.txt" % img_idx, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=15))
# Check if the output is single dimensional or multi dimensional
if len(p.shape) > 1:
label_num = p.shape[0]
else:
label_num = 1
for idx in np.arange(label_num):
obj = ObjectLabel()
if label_num > 1:
# Fill in the object list
obj.type = p[idx, 0]
obj.truncation = float(p[idx, 1])
obj.occlusion = float(p[idx, 2])
obj.alpha = float(p[idx, 3])
obj.x1 = float(p[idx, 4])
obj.y1 = float(p[idx, 5])
obj.x2 = float(p[idx, 6])
obj.y2 = float(p[idx, 7])
obj.h = float(p[idx, 8])
obj.w = float(p[idx, 9])
obj.l = float(p[idx, 10])
obj.t = (float(p[idx, 11]), float(p[idx, 12]), float(p[idx, 13]))
obj.ry = float(p[idx, 14])
if results:
obj.score = float(p[idx, 15])
else:
obj.score = 0.0
else:
# Fill in the object list
obj.type = p[0]
obj.truncation = float(p[1])
obj.occlusion = float(p[2])
obj.alpha = float(p[3])
obj.x1 = float(p[4])
obj.y1 = float(p[5])
obj.x2 = float(p[6])
obj.y2 = float(p[7])
obj.h = float(p[8])
obj.w = float(p[9])
obj.l = float(p[10])
obj.t = (float(p[11]), float(p[12]), float(p[13]))
obj.ry = float(p[14])
if results:
obj.score = float(p[15])
else:
obj.score = 0.0
obj_list.append(obj)
return obj_list
def build_bbs_from_objects(obj_list, class_needed):
""" Converts between a list of objects and a numpy array containing the
bounding boxes.
:param obj_list: an object list as per object class
:param class_needed: 'Car', 'Pedestrian' ... If no class filtering is
needed use 'All'
:return boxes_2d : a numpy array formed as a list of boxes in the form
[boxes_frame_1, ... boxes_frame_n], where boxes_frame_n is a numpy
array containing all bounding boxes in the frame n with the format:
[[x1, y1, x2, y2], [x1, y1, x2, y2]].
:return boxes_3d : a numpy array formed as a list of boxes in the form
[boxes_frame_1, ... boxes_frame_n], where boxes_frame_n is a numpy
array containing all bounding boxes in the frame n with the format:
[[ry, l, h, w, tx, ty, tz],...[ry, l, h, w, tx, ty, tz]]
:return scores : a numpy array of the form
[[scores_frame_1],
...,
[scores_frame_n]]
"""
if class_needed == 'All':
obj_detections = obj_list
else:
if isinstance(class_needed, str):
obj_detections = [detections for detections in obj_list if
detections.type == class_needed]
elif isinstance(class_needed, list):
obj_detections = [detections for detections in obj_list if
detections.type in class_needed]
else:
raise TypeError("Invalid type for class_needed, {} should be "
"str or list".format(type(class_needed)))
# Build A Numpy Array Of 2D Bounding Boxes
x1 = [obj.x1 for obj in obj_detections]
y1 = [obj.y1 for obj in obj_detections]
x2 = [obj.x2 for obj in obj_detections]
y2 = [obj.y2 for obj in obj_detections]
ry = [obj.ry for obj in obj_detections]
l = [obj.l for obj in obj_detections]
h = [obj.h for obj in obj_detections]
w = [obj.w for obj in obj_detections]
tx = [obj.t[0] for obj in obj_detections]
ty = [obj.t[1] for obj in obj_detections]
tz = [obj.t[2] for obj in obj_detections]
scores = [obj.score for obj in obj_detections]
num_objs = len(obj_detections)
boxes_2d = np.zeros((num_objs, 4))
boxes_3d = np.zeros((num_objs, 7)) # [ry, l, h, w, tx, ty, tz]
for it in range(num_objs):
boxes_2d[it] = np.array([x1[it],
y1[it],
x2[it],
y2[it]])
boxes_3d[it] = np.array([ry[it],
l[it],
h[it],
w[it],
tx[it],
ty[it],
tz[it]])
return boxes_2d, boxes_3d, scores
def get_lidar_point_cloud(img_idx, calib_dir, velo_dir,
im_size=None, min_intensity=None):
""" Calculates the lidar point cloud, and optionally returns only the
points that are projected to the image.
:param img_idx: image index
:param calib_dir: directory with calibration files
:param velo_dir: directory with velodyne files
:param im_size: (optional) 2 x 1 list containing the size of the image
to filter the point cloud [w, h]
:param min_intensity: (optional) minimum intensity required to keep a point
:return: (3, N) point_cloud in the form [[x,...][y,...][z,...]] 摄像头前的点, 以及图像范围内的点
"""
# Read calibration info
frame_calib = calib_utils.read_calibration(calib_dir, img_idx)
x, y, z, i = calib_utils.read_lidar(velo_dir=velo_dir, img_idx=img_idx)
# Calculate the point cloud
pts = np.vstack((x, y, z)).T
pts = calib_utils.lidar_to_cam_frame(pts, frame_calib) # 将点云坐标系转换成图像坐标系
# The given image is assumed to be a 2D image
if not im_size:
point_cloud = pts.T
return point_cloud
else:
# Only keep points in front of camera (positive z) 只保存摄像头前的点, 及z>0的点
pts = pts[pts[:, 2] > 0]
point_cloud = pts.T
# Project to image frame
point_in_im = calib_utils.project_to_image(point_cloud, p=frame_calib.p2).T # 映射到图像上
# Filter based on the given image size 保证在图像的范围内
image_filter = (point_in_im[:, 0] > 0) & \
(point_in_im[:, 0] < im_size[0]) & \
(point_in_im[:, 1] > 0) & \
(point_in_im[:, 1] < im_size[1])
if not min_intensity:
return pts[image_filter].T
else:
intensity_filter = i > min_intensity
point_filter = np.logical_and(image_filter, intensity_filter)
return pts[point_filter].T
def get_road_plane(img_idx, planes_dir):
"""Reads the road plane from file
:param int img_idx : Index of image
:param str planes_dir : directory containing plane text files
:return plane : List containing plane equation coefficients
"""
plane_file = planes_dir + '/%06d.txt' % img_idx
with open(plane_file, 'r') as input_file:
lines = input_file.readlines()
input_file.close()
# Plane coefficients stored in 4th row
lines = lines[3].split()
# Convert str to float
lines = [float(i) for i in lines]
plane = np.asarray(lines)
# Ensure normal is always facing up.
# In Kitti's frame of reference, +y is down
if plane[1] > 0:
plane = -plane
# Normalize the plane coefficients
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
def compute_box_corners_3d(object_label):
"""Computes the 3D bounding box corner positions from an ObjectLabel
:param object_label: ObjectLabel to compute corners from
:return: a numpy array of 3D corners if the box is in front of the camera,
an empty array otherwise
"""
# Compute rotational matrix
rot = np.array([[+np.cos(object_label.ry), 0, +np.sin(object_label.ry)],
[0, 1, 0],
[-np.sin(object_label.ry), 0, +np.cos(object_label.ry)]])
l = object_label.l
w = object_label.w
h = object_label.h
# 3D BB corners
x_corners = np.array(
[l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])
y_corners = np.array([0, 0, 0, 0, -h, -h, -h, -h])
z_corners = np.array(
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])
corners_3d = np.dot(rot, np.array([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + object_label.t[0]
corners_3d[1, :] = corners_3d[1, :] + object_label.t[1]
corners_3d[2, :] = corners_3d[2, :] + object_label.t[2]
return corners_3d
def project_box3d_to_image(corners_3d, p):
"""Computes the 3D bounding box projected onto
image space.
Keyword arguments:
obj -- object file to draw bounding box
p -- transform matrix
Returns:
corners : numpy array of corner points projected
onto image space.
face_idx: numpy array of 3D bounding box face
"""
# index for 3d bounding box face
# it is converted to 4x4 matrix
face_idx = np.array([0, 1, 5, 4, # front face
1, 2, 6, 5, # left face
2, 3, 7, 6, # back face
3, 0, 4, 7]).reshape((4, 4)) # right face
return calib_utils.project_to_image(corners_3d, p), face_idx
def compute_orientation_3d(obj, p):
"""Computes the orientation given object and camera matrix
Keyword arguments:
obj -- object file to draw bounding box
p -- transform matrix
"""
# compute rotational matrix
rot = np.array([[+np.cos(obj.ry), 0, +np.sin(obj.ry)],
[0, 1, 0],
[-np.sin(obj.ry), 0, +np.cos(obj.ry)]])
orientation3d = np.array([0.0, obj.l, 0.0, 0.0, 0.0, 0.0]).reshape(3, 2)
orientation3d = np.dot(rot, orientation3d)
orientation3d[0, :] = orientation3d[0, :] + obj.t[0]
orientation3d[1, :] = orientation3d[1, :] + obj.t[1]
orientation3d[2, :] = orientation3d[2, :] + obj.t[2]
# only draw for boxes that are in front of the camera
for idx in np.arange(orientation3d.shape[1]):
if orientation3d[2, idx] < 0.1:
return None
return calib_utils.project_to_image(orientation3d, p)
def is_point_inside(points, box_corners):
"""Check if each point in a 3D point cloud lies within the 3D bounding box
If we think of the bounding box as having bottom face
defined by [P1, P2, P3, P4] and top face [P5, P6, P7, P8]
then there are three directions on a perpendicular edge:
u = P1 - P2
v = P1 - P4
w = P1 - P5
A point x lies within the box when the following constraints
are respected:
- The dot product u.x is between u.P1 and u.P2
- The dot product v.x is between v.P1 and v.P4
- The dot product w.x is between w.P1 and w.P5
:param points: (3, N) point cloud to test in the form
[[x1...xn], [y1...yn], [z1...zn]]
:param box_corners: 3D corners of the bounding box
:return bool mask of which points are within the bounding box.
Use numpy function .all() to check all points
"""
p1 = box_corners[:, 0]
p2 = box_corners[:, 1]
p4 = box_corners[:, 3]
p5 = box_corners[:, 4]
u = p2 - p1
v = p4 - p1
w = p5 - p1
# if u.P1 < u.x < u.P2
u_dot_x = np.dot(u, points)
u_dot_p1 = np.dot(u, p1)
u_dot_p2 = np.dot(u, p2)
# if v.P1 < v.x < v.P4
v_dot_x = np.dot(v, points)
v_dot_p1 = np.dot(v, p1)
v_dot_p2 = np.dot(v, p4)
# if w.P1 < w.x < w.P5
w_dot_x = np.dot(w, points)
w_dot_p1 = np.dot(w, p1)
w_dot_p2 = np.dot(w, p5)
point_mask = (u_dot_p1 < u_dot_x) & (u_dot_x < u_dot_p2) & \
(v_dot_p1 < v_dot_x) & (v_dot_x < v_dot_p2) & \
(w_dot_p1 < w_dot_x) & (w_dot_x < w_dot_p2)
return point_mask
def get_point_filter(point_cloud, extents, ground_plane=None, offset_dist=2.0):
"""
Creates a point filter using the 3D extents and ground plane
:param point_cloud: Point cloud in the form [[x,...],[y,...],[z,...]]
:param extents: 3D area in the form
[[min_x, max_x], [min_y, max_y], [min_z, max_z]]
:param ground_plane: Optional, coefficients of the ground plane
(a, b, c, d)
:param offset_dist: If ground_plane is provided, removes points above
this offset from the ground_plane
:return: A binary mask for points within the extents and offset plane
"""
point_cloud = np.asarray(point_cloud)
# Filter points within certain xyz range
x_extents = extents[0]
y_extents = extents[1]
z_extents = extents[2]
extents_filter = (point_cloud[0] > x_extents[0]) & \
(point_cloud[0] < x_extents[1]) & \
(point_cloud[1] > y_extents[0]) & \
(point_cloud[1] < y_extents[1]) & \
(point_cloud[2] > z_extents[0]) & \
(point_cloud[2] < z_extents[1])
if ground_plane is not None:
ground_plane = np.array(ground_plane)
# Calculate filter using ground plane
ones_col = np.ones(point_cloud.shape[1])
padded_points = np.vstack([point_cloud, ones_col])
offset_plane = ground_plane + [0, 0, 0, -offset_dist]
# Create plane filter
dot_prod = np.dot(offset_plane, padded_points)
plane_filter = dot_prod < 0
# Combine the two filters
point_filter = np.logical_and(extents_filter, plane_filter)
else:
# Only use the extents for filtering
point_filter = extents_filter
return point_filter
| [
"wavedata.tools.core.calib_utils.project_to_image",
"wavedata.tools.core.calib_utils.read_calibration",
"numpy.logical_and",
"os.stat",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.sin",
"wavedata.tools.core.calib_utils.lidar_to_cam_frame",
"numpy.arange",
"wavedata.tools.core.calib_util... | [((3056, 3076), 'numpy.arange', 'np.arange', (['label_num'], {}), '(label_num)\n', (3065, 3076), True, 'import numpy as np\n'), ((6754, 6777), 'numpy.zeros', 'np.zeros', (['(num_objs, 4)'], {}), '((num_objs, 4))\n', (6762, 6777), True, 'import numpy as np\n'), ((6793, 6816), 'numpy.zeros', 'np.zeros', (['(num_objs, 7)'], {}), '((num_objs, 7))\n', (6801, 6816), True, 'import numpy as np\n'), ((8098, 8146), 'wavedata.tools.core.calib_utils.read_calibration', 'calib_utils.read_calibration', (['calib_dir', 'img_idx'], {}), '(calib_dir, img_idx)\n', (8126, 8146), False, 'from wavedata.tools.core import calib_utils\n'), ((8164, 8222), 'wavedata.tools.core.calib_utils.read_lidar', 'calib_utils.read_lidar', ([], {'velo_dir': 'velo_dir', 'img_idx': 'img_idx'}), '(velo_dir=velo_dir, img_idx=img_idx)\n', (8186, 8222), False, 'from wavedata.tools.core import calib_utils\n'), ((8299, 8347), 'wavedata.tools.core.calib_utils.lidar_to_cam_frame', 'calib_utils.lidar_to_cam_frame', (['pts', 'frame_calib'], {}), '(pts, frame_calib)\n', (8329, 8347), False, 'from wavedata.tools.core import calib_utils\n'), ((9857, 9874), 'numpy.asarray', 'np.asarray', (['lines'], {}), '(lines)\n', (9867, 9874), True, 'import numpy as np\n'), ((10060, 10086), 'numpy.linalg.norm', 'np.linalg.norm', (['plane[0:3]'], {}), '(plane[0:3])\n', (10074, 10086), True, 'import numpy as np\n'), ((10760, 10830), 'numpy.array', 'np.array', (['[l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]'], {}), '([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])\n', (10768, 10830), True, 'import numpy as np\n'), ((10856, 10894), 'numpy.array', 'np.array', (['[0, 0, 0, 0, -h, -h, -h, -h]'], {}), '([0, 0, 0, 0, -h, -h, -h, -h])\n', (10864, 10894), True, 'import numpy as np\n'), ((10911, 10981), 'numpy.array', 'np.array', (['[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]'], {}), '([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])\n', (10919, 10981), True, 'import numpy as np\n'), ((12477, 12503), 'numpy.dot', 'np.dot', (['rot', 'orientation3d'], {}), '(rot, orientation3d)\n', (12483, 12503), True, 'import numpy as np\n'), ((12750, 12783), 'numpy.arange', 'np.arange', (['orientation3d.shape[1]'], {}), '(orientation3d.shape[1])\n', (12759, 12783), True, 'import numpy as np\n'), ((12861, 12907), 'wavedata.tools.core.calib_utils.project_to_image', 'calib_utils.project_to_image', (['orientation3d', 'p'], {}), '(orientation3d, p)\n', (12889, 12907), False, 'from wavedata.tools.core import calib_utils\n'), ((14013, 14030), 'numpy.dot', 'np.dot', (['u', 'points'], {}), '(u, points)\n', (14019, 14030), True, 'import numpy as np\n'), ((14046, 14059), 'numpy.dot', 'np.dot', (['u', 'p1'], {}), '(u, p1)\n', (14052, 14059), True, 'import numpy as np\n'), ((14075, 14088), 'numpy.dot', 'np.dot', (['u', 'p2'], {}), '(u, p2)\n', (14081, 14088), True, 'import numpy as np\n'), ((14131, 14148), 'numpy.dot', 'np.dot', (['v', 'points'], {}), '(v, points)\n', (14137, 14148), True, 'import numpy as np\n'), ((14164, 14177), 'numpy.dot', 'np.dot', (['v', 'p1'], {}), '(v, p1)\n', (14170, 14177), True, 'import numpy as np\n'), ((14193, 14206), 'numpy.dot', 'np.dot', (['v', 'p4'], {}), '(v, p4)\n', (14199, 14206), True, 'import numpy as np\n'), ((14249, 14266), 'numpy.dot', 'np.dot', (['w', 'points'], {}), '(w, points)\n', (14255, 14266), True, 'import numpy as np\n'), ((14282, 14295), 'numpy.dot', 'np.dot', (['w', 'p1'], {}), '(w, p1)\n', (14288, 14295), True, 'import numpy as np\n'), ((14311, 14324), 'numpy.dot', 'np.dot', (['w', 'p5'], {}), '(w, p5)\n', (14317, 14324), True, 'import numpy as np\n'), ((15173, 15196), 'numpy.asarray', 'np.asarray', (['point_cloud'], {}), '(point_cloud)\n', (15183, 15196), True, 'import numpy as np\n'), ((6901, 6943), 'numpy.array', 'np.array', (['[x1[it], y1[it], x2[it], y2[it]]'], {}), '([x1[it], y1[it], x2[it], y2[it]])\n', (6909, 6943), True, 'import numpy as np\n'), ((7067, 7130), 'numpy.array', 'np.array', (['[ry[it], l[it], h[it], w[it], tx[it], ty[it], tz[it]]'], {}), '([ry[it], l[it], h[it], w[it], tx[it], ty[it], tz[it]])\n', (7075, 7130), True, 'import numpy as np\n'), ((8266, 8286), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (8275, 8286), True, 'import numpy as np\n'), ((9196, 9242), 'numpy.logical_and', 'np.logical_and', (['image_filter', 'intensity_filter'], {}), '(image_filter, intensity_filter)\n', (9210, 9242), True, 'import numpy as np\n'), ((11021, 11064), 'numpy.array', 'np.array', (['[x_corners, y_corners, z_corners]'], {}), '([x_corners, y_corners, z_corners])\n', (11029, 11064), True, 'import numpy as np\n'), ((11939, 11982), 'wavedata.tools.core.calib_utils.project_to_image', 'calib_utils.project_to_image', (['corners_3d', 'p'], {}), '(corners_3d, p)\n', (11967, 11982), False, 'from wavedata.tools.core import calib_utils\n'), ((15720, 15742), 'numpy.array', 'np.array', (['ground_plane'], {}), '(ground_plane)\n', (15728, 15742), True, 'import numpy as np\n'), ((15809, 15838), 'numpy.ones', 'np.ones', (['point_cloud.shape[1]'], {}), '(point_cloud.shape[1])\n', (15816, 15838), True, 'import numpy as np\n'), ((15863, 15897), 'numpy.vstack', 'np.vstack', (['[point_cloud, ones_col]'], {}), '([point_cloud, ones_col])\n', (15872, 15897), True, 'import numpy as np\n'), ((16011, 16046), 'numpy.dot', 'np.dot', (['offset_plane', 'padded_points'], {}), '(offset_plane, padded_points)\n', (16017, 16046), True, 'import numpy as np\n'), ((16141, 16185), 'numpy.logical_and', 'np.logical_and', (['extents_filter', 'plane_filter'], {}), '(extents_filter, plane_filter)\n', (16155, 16185), True, 'import numpy as np\n'), ((2433, 2475), 'os.stat', 'os.stat', (["(label_dir + '/%06d.txt' % img_idx)"], {}), "(label_dir + '/%06d.txt' % img_idx)\n", (2440, 2475), False, 'import os\n'), ((8701, 8760), 'wavedata.tools.core.calib_utils.project_to_image', 'calib_utils.project_to_image', (['point_cloud'], {'p': 'frame_calib.p2'}), '(point_cloud, p=frame_calib.p2)\n', (8729, 8760), False, 'from wavedata.tools.core import calib_utils\n'), ((11724, 11782), 'numpy.array', 'np.array', (['[0, 1, 5, 4, 1, 2, 6, 5, 2, 3, 7, 6, 3, 0, 4, 7]'], {}), '([0, 1, 5, 4, 1, 2, 6, 5, 2, 3, 7, 6, 3, 0, 4, 7])\n', (11732, 11782), True, 'import numpy as np\n'), ((12400, 12442), 'numpy.array', 'np.array', (['[0.0, obj.l, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, obj.l, 0.0, 0.0, 0.0, 0.0])\n', (12408, 12442), True, 'import numpy as np\n'), ((2660, 2695), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'step': '(1)', 'stop': '(16)'}), '(start=0, step=1, stop=16)\n', (2669, 2695), True, 'import numpy as np\n'), ((2845, 2880), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'step': '(1)', 'stop': '(15)'}), '(start=0, step=1, stop=15)\n', (2854, 2880), True, 'import numpy as np\n'), ((10489, 10512), 'numpy.cos', 'np.cos', (['object_label.ry'], {}), '(object_label.ry)\n', (10495, 10512), True, 'import numpy as np\n'), ((10518, 10541), 'numpy.sin', 'np.sin', (['object_label.ry'], {}), '(object_label.ry)\n', (10524, 10541), True, 'import numpy as np\n'), ((10597, 10620), 'numpy.sin', 'np.sin', (['object_label.ry'], {}), '(object_label.ry)\n', (10603, 10620), True, 'import numpy as np\n'), ((10626, 10649), 'numpy.cos', 'np.cos', (['object_label.ry'], {}), '(object_label.ry)\n', (10632, 10649), True, 'import numpy as np\n'), ((12251, 12265), 'numpy.cos', 'np.cos', (['obj.ry'], {}), '(obj.ry)\n', (12257, 12265), True, 'import numpy as np\n'), ((12271, 12285), 'numpy.sin', 'np.sin', (['obj.ry'], {}), '(obj.ry)\n', (12277, 12285), True, 'import numpy as np\n'), ((12341, 12355), 'numpy.sin', 'np.sin', (['obj.ry'], {}), '(obj.ry)\n', (12347, 12355), True, 'import numpy as np\n'), ((12361, 12375), 'numpy.cos', 'np.cos', (['obj.ry'], {}), '(obj.ry)\n', (12367, 12375), True, 'import numpy as np\n')] |
from options import opt
import os
from pathlib import Path
import json
import numpy as np
from dataset import NoteDataset, get_loader
import torch
from model import Rnn, BiRNN, NeuralNet, NeuralNetWithRNN
import torch.nn as nn
import copy
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau
from visual import visualization
def preprocess(data_seq, label):
new_label=[]
for i in range(len(label)):
label_of_one_song = []
cur_note = 0
cur_note_onset = label[i][cur_note][0]
cur_note_offset = label[i][cur_note][1]
cur_note_pitch = label[i][cur_note][2]
for j in range(len(data_seq[i])):
cur_time = j * 0.032 + 0.016
if abs(cur_time - cur_note_onset) < 0.017:
label_of_one_song.append(np.array([1, 0, cur_note_pitch]))
elif cur_time < cur_note_onset or cur_note >= len(label[i]):
label_of_one_song.append(np.array([0, 0, 0.0]))
elif abs(cur_time - cur_note_offset) < 0.017:
label_of_one_song.append(np.array([0, 1, cur_note_pitch]))
cur_note = cur_note + 1
if cur_note < len(label[i]):
cur_note_onset = label[i][cur_note][0]
cur_note_offset = label[i][cur_note][1]
cur_note_pitch = label[i][cur_note][2]
else:
label_of_one_song.append(np.array([0, 0, cur_note_pitch]))
new_label.append(label_of_one_song)
return new_label
def train():
data_set = NoteDataset(data_seq, label)
train_loader, valid_loader = get_loader(data_set)
# model = Rnn(opt.input_dim, opt.hidden_size)
# model = BiRNN(opt.input_dim, opt.hidden_size, opt.num_layers)
# model=NeuralNet(opt.input_dim,[34,51,34,17])
model=NeuralNetWithRNN(opt.input_dim,[34,51,34,17])
model = model.cuda(opt.cuda_devices)
best_model_params = copy.deepcopy(model.state_dict())
best_loss = float('inf')
training_loss_list = []
valid_loss_list = []
criterion_onset = nn.BCEWithLogitsLoss()
criterion_pitch = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True, min_lr=1e-8)
record = open('record.txt', 'w')
for epoch in range(opt.epochs):
print(f'Epoch: {epoch + 1}/{opt.epochs}')
print('-' * len(f'Epoch: {epoch + 1}/{opt.epochs}'))
training_loss = 0.0
valid_loss = 0.0
total_length=0.0
model.train()
for i, sample in enumerate(tqdm(train_loader)):
inputs = sample['data']
inputs = torch.FloatTensor(inputs)
inputs = inputs.permute(1, 0, 2)
inputs = inputs.cuda(opt.cuda_devices)
target = sample['label']
target = torch.FloatTensor(target)
target = target.permute(1, 0, 2)
target = target.cuda(opt.cuda_devices)
inputs_length = list(inputs.shape)[0]
optimizer.zero_grad()
output1, output2 = model(inputs)
onset_loss = criterion_onset(output1, torch.narrow(target, dim=2, start=0, length=2))
pitch_loss = criterion_pitch(output2, torch.narrow(target, dim=2, start=2, length=1))
total_loss = onset_loss + pitch_loss
training_loss = training_loss + total_loss.item()
total_length += 1
total_loss.backward()
optimizer.step()
training_loss /= total_length
training_loss_list.append(training_loss)
print(f'training_loss: {training_loss:.4f}')
model.eval()
total_length = 0
for i, sample in enumerate(tqdm(valid_loader)):
inputs = sample['data']
inputs = torch.FloatTensor(inputs)
inputs = inputs.permute(1, 0, 2)
inputs = inputs.cuda(opt.cuda_devices)
target = sample['label']
target = torch.FloatTensor(target)
target = target.permute(1, 0, 2)
target = target.cuda(opt.cuda_devices)
inputs_length = list(inputs.shape)[0]
optimizer.zero_grad()
output1, output2 = model(inputs)
onset_loss = criterion_onset(output1, torch.narrow(target, dim=2, start=0, length=2))
pitch_loss = criterion_pitch(output2, torch.narrow(target, dim=2, start=2, length=1))
total_loss = onset_loss + pitch_loss
valid_loss = valid_loss + total_loss.item()
total_length += 1
valid_loss /= total_length
valid_loss_list.append(valid_loss)
print(f'valid_loss: {valid_loss:.4f}\n')
scheduler.step(valid_loss)
if valid_loss < best_loss:
best_loss = valid_loss
best_training_loss = training_loss
best_model_params = copy.deepcopy(model.state_dict())
if (epoch + 1) % 50 == 0:
model.load_state_dict(best_model_params)
weight_path = Path(opt.checkpoint_dir).joinpath(
f'model-{epoch + 1}epoch-{best_loss:.02f}-best_valid_loss.pth')
torch.save(model, str(weight_path))
record.write(f'{epoch + 1}\n')
record.write(f'Best training loss: {best_training_loss:.4f}\n')
record.write(f'Best valid loss: {best_loss:.4f}\n')
print(f'Best training loss: {best_training_loss:.4f}')
print(f'Best valid loss: {best_loss:.4f}')
model.load_state_dict(best_model_params)
weight_path = Path(opt.checkpoint_dir).joinpath(f'model-{best_loss:.02f}-best_valid_loss.pth')
torch.save(model, str(weight_path))
visualization(training_loss_list, valid_loss_list)
return model
if __name__ == '__main__':
THE_FOLDER = opt.data_root
data_seq = []
label = []
for the_dir in os.listdir(THE_FOLDER):
json_path = Path(THE_FOLDER).joinpath(the_dir).joinpath(the_dir+'_feature.json')
gt_path = Path(THE_FOLDER).joinpath(the_dir).joinpath(the_dir+'_groundtruth.txt')
youtube_link_path = Path(THE_FOLDER).joinpath(the_dir).joinpath(the_dir+'_link.txt')
with open(json_path,'r') as json_file:
temp = json.loads(json_file.read())
data = []
for key, value in temp.items():
data.append(value)
#print(key)
data = np.array(data).T
data_seq.append(data)
gtdata = np.loadtxt(gt_path)
label.append(gtdata)
label=preprocess(data_seq,label)
model = train()
| [
"tqdm.tqdm",
"torch.narrow",
"dataset.NoteDataset",
"torch.nn.BCEWithLogitsLoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.FloatTensor",
"model.NeuralNetWithRNN",
"pathlib.Path",
"dataset.get_loader",
"numpy.array",
"numpy.loadtxt",
"torch.nn.SmoothL1Loss",
"visual.visualization"... | [((1566, 1594), 'dataset.NoteDataset', 'NoteDataset', (['data_seq', 'label'], {}), '(data_seq, label)\n', (1577, 1594), False, 'from dataset import NoteDataset, get_loader\n'), ((1628, 1648), 'dataset.get_loader', 'get_loader', (['data_set'], {}), '(data_set)\n', (1638, 1648), False, 'from dataset import NoteDataset, get_loader\n'), ((1828, 1877), 'model.NeuralNetWithRNN', 'NeuralNetWithRNN', (['opt.input_dim', '[34, 51, 34, 17]'], {}), '(opt.input_dim, [34, 51, 34, 17])\n', (1844, 1877), False, 'from model import Rnn, BiRNN, NeuralNet, NeuralNetWithRNN\n'), ((2079, 2101), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (2099, 2101), True, 'import torch.nn as nn\n'), ((2124, 2141), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (2139, 2141), True, 'import torch.nn as nn\n'), ((2222, 2319), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.5)', 'patience': '(2)', 'verbose': '(True)', 'min_lr': '(1e-08)'}), "(optimizer, mode='min', factor=0.5, patience=2, verbose=\n True, min_lr=1e-08)\n", (2239, 2319), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((5786, 5836), 'visual.visualization', 'visualization', (['training_loss_list', 'valid_loss_list'], {}), '(training_loss_list, valid_loss_list)\n', (5799, 5836), False, 'from visual import visualization\n'), ((5969, 5991), 'os.listdir', 'os.listdir', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (5979, 5991), False, 'import os\n'), ((6556, 6575), 'numpy.loadtxt', 'np.loadtxt', (['gt_path'], {}), '(gt_path)\n', (6566, 6575), True, 'import numpy as np\n'), ((2650, 2668), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (2654, 2668), False, 'from tqdm import tqdm\n'), ((2728, 2753), 'torch.FloatTensor', 'torch.FloatTensor', (['inputs'], {}), '(inputs)\n', (2745, 2753), False, 'import torch\n'), ((2909, 2934), 'torch.FloatTensor', 'torch.FloatTensor', (['target'], {}), '(target)\n', (2926, 2934), False, 'import torch\n'), ((3813, 3831), 'tqdm.tqdm', 'tqdm', (['valid_loader'], {}), '(valid_loader)\n', (3817, 3831), False, 'from tqdm import tqdm\n'), ((3891, 3916), 'torch.FloatTensor', 'torch.FloatTensor', (['inputs'], {}), '(inputs)\n', (3908, 3916), False, 'import torch\n'), ((4072, 4097), 'torch.FloatTensor', 'torch.FloatTensor', (['target'], {}), '(target)\n', (4089, 4097), False, 'import torch\n'), ((5661, 5685), 'pathlib.Path', 'Path', (['opt.checkpoint_dir'], {}), '(opt.checkpoint_dir)\n', (5665, 5685), False, 'from pathlib import Path\n'), ((6491, 6505), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6499, 6505), True, 'import numpy as np\n'), ((3213, 3259), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(0)', 'length': '(2)'}), '(target, dim=2, start=0, length=2)\n', (3225, 3259), False, 'import torch\n'), ((3311, 3357), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(2)', 'length': '(1)'}), '(target, dim=2, start=2, length=1)\n', (3323, 3357), False, 'import torch\n'), ((4376, 4422), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(0)', 'length': '(2)'}), '(target, dim=2, start=0, length=2)\n', (4388, 4422), False, 'import torch\n'), ((4474, 4520), 'torch.narrow', 'torch.narrow', (['target'], {'dim': '(2)', 'start': '(2)', 'length': '(1)'}), '(target, dim=2, start=2, length=1)\n', (4486, 4520), False, 'import torch\n'), ((809, 841), 'numpy.array', 'np.array', (['[1, 0, cur_note_pitch]'], {}), '([1, 0, cur_note_pitch])\n', (817, 841), True, 'import numpy as np\n'), ((5144, 5168), 'pathlib.Path', 'Path', (['opt.checkpoint_dir'], {}), '(opt.checkpoint_dir)\n', (5148, 5168), False, 'from pathlib import Path\n'), ((957, 978), 'numpy.array', 'np.array', (['[0, 0, 0.0]'], {}), '([0, 0, 0.0])\n', (965, 978), True, 'import numpy as np\n'), ((6013, 6029), 'pathlib.Path', 'Path', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (6017, 6029), False, 'from pathlib import Path\n'), ((6100, 6116), 'pathlib.Path', 'Path', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (6104, 6116), False, 'from pathlib import Path\n'), ((6200, 6216), 'pathlib.Path', 'Path', (['THE_FOLDER'], {}), '(THE_FOLDER)\n', (6204, 6216), False, 'from pathlib import Path\n'), ((1079, 1111), 'numpy.array', 'np.array', (['[0, 1, cur_note_pitch]'], {}), '([0, 1, cur_note_pitch])\n', (1087, 1111), True, 'import numpy as np\n'), ((1435, 1467), 'numpy.array', 'np.array', (['[0, 0, cur_note_pitch]'], {}), '([0, 0, cur_note_pitch])\n', (1443, 1467), True, 'import numpy as np\n')] |
from typing import Any, List, Dict, Union, Optional
import time
import gym
import gym_hybrid
import copy
import numpy as np
from easydict import EasyDict
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo
from ding.envs.common import EnvElementInfo, affine_transform
from ding.torch_utils import to_ndarray, to_list
from ding.utils import ENV_REGISTRY
@ENV_REGISTRY.register('gym_hybrid')
class GymHybridEnv(BaseEnv):
default_env_id = ['Sliding-v0', 'Moving-v0']
def __init__(self, cfg: EasyDict) -> None:
self._cfg = cfg
self._env_id = cfg.env_id
assert self._env_id in self.default_env_id
self._act_scale = cfg.act_scale
self._init_flag = False
self._replay_path = None
def reset(self) -> np.ndarray:
if not self._init_flag:
self._env = gym.make(self._env_id)
if self._replay_path is not None:
self._env = gym.wrappers.Monitor(
self._env, self._replay_path, video_callable=lambda episode_id: True, force=True
)
self._env.metadata["render.modes"] = ["human", "rgb_array"]
self._init_flag = True
if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:
np_seed = 100 * np.random.randint(1, 1000)
self._env.seed(self._seed + np_seed)
elif hasattr(self, '_seed'):
self._env.seed(self._seed)
self._final_eval_reward = 0
obs = self._env.reset()
obs = to_ndarray(obs).astype(np.float32)
return obs
def close(self) -> None:
if self._init_flag:
self._env.close()
self._init_flag = False
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def step(self, action: Dict) -> BaseEnvTimestep:
if self._act_scale:
# acceleration_value.
action['action_args'][0] = affine_transform(action['action_args'][0], min_val=0, max_val=1)
# rotation_value. Following line can be omitted, because in the affine_transform function,
# we have already done the clip(-1,1) operation
action['action_args'][1] = affine_transform(action['action_args'][1], min_val=-1, max_val=1)
action = [action['action_type'], action['action_args']]
obs, rew, done, info = self._env.step(action)
self._final_eval_reward += rew
if done:
info['final_eval_reward'] = self._final_eval_reward
obs = to_ndarray(obs)
if isinstance(obs, list): # corner case
for i in range(len(obs)):
if len(obs[i].shape) == 0:
obs[i] = np.array([obs[i]])
obs = np.concatenate(obs)
assert isinstance(obs, np.ndarray) and obs.shape == (10, )
obs = obs.astype(np.float32)
rew = to_ndarray([rew]) # wrapped to be transfered to a numpy array with shape (1,)
if isinstance(rew, list):
rew = rew[0]
assert isinstance(rew, np.ndarray) and rew.shape == (1, )
info['action_args_mask'] = np.array([[1, 0], [0, 1], [0, 0]])
return BaseEnvTimestep(obs, rew, done, info)
def get_random_action(self) -> Dict:
# action_type: 0, 1, 2
# action_args:
# - acceleration_value: [0, 1]
# - rotation_value: [-1, 1]
raw_action = self._env.action_space.sample()
return {'action_type': raw_action[0], 'action_args': raw_action[1]}
def info(self) -> BaseEnvInfo:
T = EnvElementInfo
return BaseEnvInfo(
agent_num=1,
obs_space=T(
(10, ),
{
'min': -1,
'max': 2,
'dtype': np.float32,
},
),
# [min, max)
act_space=T(
(3, ),
{
'min': 0,
'max': 3,
'dtype': int,
},
),
rew_space=T(
(1, ),
{
'min': -1.0,
'max': 1.0
},
),
use_wrappers=None,
)
def __repr__(self) -> str:
return "DI-engine gym hybrid Env"
def enable_save_replay(self, replay_path: Optional[str] = None) -> None:
if replay_path is None:
replay_path = './video'
self._replay_path = replay_path
| [
"numpy.random.seed",
"gym.make",
"ding.torch_utils.to_ndarray",
"gym.wrappers.Monitor",
"ding.envs.BaseEnvTimestep",
"numpy.random.randint",
"numpy.array",
"ding.utils.ENV_REGISTRY.register",
"numpy.concatenate",
"ding.envs.common.affine_transform"
] | [((364, 399), 'ding.utils.ENV_REGISTRY.register', 'ENV_REGISTRY.register', (['"""gym_hybrid"""'], {}), "('gym_hybrid')\n", (385, 399), False, 'from ding.utils import ENV_REGISTRY\n'), ((1853, 1879), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (1867, 1879), True, 'import numpy as np\n'), ((2624, 2639), 'ding.torch_utils.to_ndarray', 'to_ndarray', (['obs'], {}), '(obs)\n', (2634, 2639), False, 'from ding.torch_utils import to_ndarray, to_list\n'), ((2975, 2992), 'ding.torch_utils.to_ndarray', 'to_ndarray', (['[rew]'], {}), '([rew])\n', (2985, 2992), False, 'from ding.torch_utils import to_ndarray, to_list\n'), ((3214, 3248), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 0]]'], {}), '([[1, 0], [0, 1], [0, 0]])\n', (3222, 3248), True, 'import numpy as np\n'), ((3264, 3301), 'ding.envs.BaseEnvTimestep', 'BaseEnvTimestep', (['obs', 'rew', 'done', 'info'], {}), '(obs, rew, done, info)\n', (3279, 3301), False, 'from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo\n'), ((832, 854), 'gym.make', 'gym.make', (['self._env_id'], {}), '(self._env_id)\n', (840, 854), False, 'import gym\n'), ((2035, 2099), 'ding.envs.common.affine_transform', 'affine_transform', (["action['action_args'][0]"], {'min_val': '(0)', 'max_val': '(1)'}), "(action['action_args'][0], min_val=0, max_val=1)\n", (2051, 2099), False, 'from ding.envs.common import EnvElementInfo, affine_transform\n'), ((2302, 2367), 'ding.envs.common.affine_transform', 'affine_transform', (["action['action_args'][1]"], {'min_val': '(-1)', 'max_val': '(1)'}), "(action['action_args'][1], min_val=-1, max_val=1)\n", (2318, 2367), False, 'from ding.envs.common import EnvElementInfo, affine_transform\n'), ((2836, 2855), 'numpy.concatenate', 'np.concatenate', (['obs'], {}), '(obs)\n', (2850, 2855), True, 'import numpy as np\n'), ((929, 1035), 'gym.wrappers.Monitor', 'gym.wrappers.Monitor', (['self._env', 'self._replay_path'], {'video_callable': '(lambda episode_id: True)', 'force': '(True)'}), '(self._env, self._replay_path, video_callable=lambda\n episode_id: True, force=True)\n', (949, 1035), False, 'import gym\n'), ((1302, 1328), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (1319, 1328), True, 'import numpy as np\n'), ((1536, 1551), 'ding.torch_utils.to_ndarray', 'to_ndarray', (['obs'], {}), '(obs)\n', (1546, 1551), False, 'from ding.torch_utils import to_ndarray, to_list\n'), ((2799, 2817), 'numpy.array', 'np.array', (['[obs[i]]'], {}), '([obs[i]])\n', (2807, 2817), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import cv2 as cv
import numpy as np
def sharpen(image):
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #銳化
dst = cv.filter2D(image, -1, kernel=kernel)
cv.imwrite("output00000000_sharpen.png",dst)
for i in range(1):
for j in range(1):
src = cv.imread("./m/png/m9/output00000000.png")
sharpen(src)
| [
"cv2.imwrite",
"numpy.array",
"cv2.imread",
"cv2.filter2D"
] | [((114, 173), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]', 'np.float32'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n', (122, 173), True, 'import numpy as np\n'), ((188, 225), 'cv2.filter2D', 'cv.filter2D', (['image', '(-1)'], {'kernel': 'kernel'}), '(image, -1, kernel=kernel)\n', (199, 225), True, 'import cv2 as cv\n'), ((233, 278), 'cv2.imwrite', 'cv.imwrite', (['"""output00000000_sharpen.png"""', 'dst'], {}), "('output00000000_sharpen.png', dst)\n", (243, 278), True, 'import cv2 as cv\n'), ((334, 376), 'cv2.imread', 'cv.imread', (['"""./m/png/m9/output00000000.png"""'], {}), "('./m/png/m9/output00000000.png')\n", (343, 376), True, 'import cv2 as cv\n')] |
"""Update a SQLite database in real time. Requires Grafana and Python >3.6.
```
python -m pip install numpy tqdm
python create_grafana_sample_db.py
```
See discussion context: https://github.com/fr-ser/grafana-sqlite-datasource/issues/21
"""
import sqlite3
import time
from contextlib import ContextDecorator
from pathlib import Path
import numpy as np
from tqdm import tqdm
class SQLConnection(ContextDecorator):
"""Ensure the SQLite connection is properly opened and closed."""
def __init__(self, path_db: Path) -> None:
"""Initialize context wrapper.
Args:
path_db: Path to a SQLite file
"""
self.conn = None
self.path_db = path_db
def __enter__(self) -> sqlite3.Connection:
"""Connect to the database and return connection reference.
Returns:
Connection: connection to sqlite database
"""
self.conn = sqlite3.connect(self.path_db)
return self.conn
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Close connection.""" # noqa: DAR101
self.conn.close()
def generate_fake_db(path_db: Path) -> None:
"""Populate a SQL database in real time to test real time chart visualization.
Args:
path_db: path to SQLite file
"""
print(f'Creating: {path_db}') # noqa: T001
with SQLConnection(path_db) as conn:
cursor = conn.cursor()
cursor.execute('DROP TABLE IF EXISTS test_data;')
conn.commit()
cursor.execute("""CREATE TABLE test_data (
time FLOAT NOT NULL,
temp FLOAT NOT NULL,
min FLOAT NOT NULL,
max FLOAT NOT NULL
);""")
conn.commit()
while True:
# Generate random data points and add to the database
points = 1000
mu, sigma = (10, 8) # mean and standard deviation
samples = np.random.normal(mu, sigma, points)
for idx in tqdm(range(points)):
values = f'{time.time()}, {samples[idx]}, {samples[idx] - 2.1}, {samples[idx] + 3.2}'
cursor.execute(f'INSERT INTO test_data (time, temp, min, max) VALUES ({values});') # noqa: S608, Q440
conn.commit()
time.sleep(1)
if __name__ == '__main__':
generate_fake_db(path_db=Path(__file__).resolve().parent / 'test_db.sqlite')
| [
"time.time",
"time.sleep",
"pathlib.Path",
"sqlite3.connect",
"numpy.random.normal"
] | [((926, 955), 'sqlite3.connect', 'sqlite3.connect', (['self.path_db'], {}), '(self.path_db)\n', (941, 955), False, 'import sqlite3\n'), ((1949, 1984), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'points'], {}), '(mu, sigma, points)\n', (1965, 1984), True, 'import numpy as np\n'), ((2296, 2309), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2306, 2309), False, 'import time\n'), ((2057, 2068), 'time.time', 'time.time', ([], {}), '()\n', (2066, 2068), False, 'import time\n'), ((2368, 2382), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2372, 2382), False, 'from pathlib import Path\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, Union
from warnings import filterwarnings
import pyopencl as cl
import pyopencl.array as cla
import numpy as np
from gpyfft.fft import FFT
from ._util import get_context
filterwarnings("ignore", module="pyopencl")
if TYPE_CHECKING:
from reikna.cluda.cuda import Array as cudaArray
from reikna.cluda.ocl import Array as oclArray
Array = Union[cudaArray, oclArray]
context = get_context()
queue = cl.CommandQueue(context)
# plan cache
_PLAN_CACHE = {}
def _normalize_axes(dshape, axes):
"""Convert possibly negative axes to positive axes."""
if axes is None:
return None
_axes = [axes] if np.isscalar(axes) else list(axes)
try:
return tuple(np.arange(len(dshape))[_axes])
except Exception as e:
raise TypeError(f"Cannot normalize axes {axes}: {e}")
def _get_fft_plan(arr, axes=None, fast_math=False):
"""Cache and return a reikna FFT plan suitable for `arr` type and shape."""
axes = _normalize_axes(arr.shape, axes)
plan_key = (arr.shape, arr.dtype, axes, fast_math)
if plan_key not in _PLAN_CACHE:
_PLAN_CACHE[plan_key] = FFT(context, queue, arr, axes=axes, fast_math=fast_math)
return _PLAN_CACHE[plan_key]
def _fftn(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, ...] | None = None,
inplace: bool = False,
fast_math: bool = True,
*,
_inverse: bool = False,
) -> Array:
"""Perform fast Fourier transformation on `input_array`.
Parameters
----------
input_arr : numpy or OCL array
A numpy or OCL array to transform. If an OCL array is provided, it must already
be of type `complex64`. If a numpy array is provided, it will be converted
to `float32` before the transformation is performed.
output_arr : numpy or OCL array, optional
An optional array/buffer to use for output, by default None
axes : tuple of int, optional
T tuple with axes over which to perform the transform.
If not given, the transform is performed over all the axes., by default None
inplace : bool, optional
Whether to place output data in the `input_arr` buffer, by default False
fast_math : bool, optional
Whether to enable fast (less precise) mathematical operations during
compilation, by default True
_inverse : bool, optional
Perform inverse FFT, by default False. (prefer using `ifftn`)
Returns
-------
OCLArray
result of transformation (still on GPU). Use `.get()` or `cle.pull`
to retrieve from GPU.
If `inplace` or `output_arr` where used, data will also be placed in
the corresponding buffer as a side effect.
Raises
------
TypeError
If OCL array is provided that is not of type complex64. Or if an unrecognized
array is provided.
ValueError
If inplace is used for numpy array, or both `output_arr` and `inplace` are used.
"""
if output_arr is not None and inplace:
raise ValueError("`output_arr` cannot be provided if `inplace` is True")
assert input_arr.dtype in (np.float32, np.float64, np.complex64, np.complex128)
if not np.iscomplexobj(input_arr):
input_arr = input_arr.astype(np.complex64) # TODO
_input_array = (
cla.to_device(queue, input_arr)
if isinstance(input_arr, np.ndarray)
else input_arr
)
transform = _get_fft_plan(_input_array, axes=axes, fast_math=fast_math)
if not inplace:
if output_arr is None:
output_arr = cla.empty_like(_input_array)
transform.result = output_arr
(event,) = transform.enqueue(forward=not _inverse)
event.wait()
if not inplace:
return output_arr
return _input_array
def fft(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: int = -1,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return fftn(input_arr, output_arr, (axes,), inplace, fast_math)
def ifft(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: int = -1,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return ifftn(input_arr, output_arr, (axes,), inplace, fast_math)
def fft2(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, int] = (-2, -1),
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return fftn(input_arr, output_arr, axes, inplace, fast_math)
def ifft2(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, int] = (-2, -1),
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return ifftn(input_arr, output_arr, axes, inplace, fast_math)
def fftn(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, ...] | None = None,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
return _fftn(input_arr, output_arr, axes, inplace, fast_math)
def ifftn(
input_arr,
output_arr=None,
axes=None,
inplace=False,
fast_math=True,
):
return _fftn(input_arr, output_arr, axes, inplace, fast_math, _inverse=True)
def rfft(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: int = -1,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
x = _fftn(input_arr, output_arr, (axes,), inplace, fast_math)
return x[:, : input_arr.shape[-1] // 2 + 1]
# FIXME
# def irfft(
# input_arr: np.ndarray | Array,
# output_arr: np.ndarray | Array = None,
# axes: int = -1,
# inplace: bool = False,
# fast_math: bool = True,
# ) -> Array:
# x = _fftn(input_arr, output_arr, axes, inplace, fast_math, _inverse=True)
# shp = list(input_arr.shape)
# n = shp[axes]
# shp[axes] = 2 * n - 2
# result = empty(shp, np.float32)
# result[..., :n] = x.real
# result[..., n - 1 :] = x.real[..., 1:][::-1]
# return result.astype(np.float64)
def rfft2(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, int] = (-2, -1),
inplace: bool = False,
fast_math: bool = True,
) -> Array:
x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
return x[:, : input_arr.shape[1] // 2 + 1]
# FIXME
# def irfft2(
# input_arr: np.ndarray | Array,
# output_arr: np.ndarray | Array = None,
# axes: Tuple[int, int] = (-2, -1),
# inplace: bool = False,
# fast_math: bool = True,
# ) -> Array:
# x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
# return x[:, : input_arr.shape[1] // 2 + 1]
def rfftn(
input_arr: np.ndarray | Array,
output_arr: np.ndarray | Array = None,
axes: tuple[int, ...] | None = None,
inplace: bool = False,
fast_math: bool = True,
) -> Array:
x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
return x[:, : input_arr.shape[1] // 2 + 1]
# FIXME
# def irfftn(
# input_arr: np.ndarray | Array,
# output_arr: np.ndarray | Array = None,
# axes: tuple[int, ...] | None = None,
# inplace: bool = False,
# fast_math: bool = True,
# ) -> Array:
# x = _fftn(input_arr, output_arr, axes, inplace, fast_math)
# return x[..., : input_arr.shape[1] // 2 + 1]
| [
"pyopencl.array.empty_like",
"gpyfft.fft.FFT",
"numpy.iscomplexobj",
"warnings.filterwarnings",
"numpy.isscalar",
"pyopencl.CommandQueue",
"pyopencl.array.to_device"
] | [((244, 287), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'module': '"""pyopencl"""'}), "('ignore', module='pyopencl')\n", (258, 287), False, 'from warnings import filterwarnings\n'), ((485, 509), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['context'], {}), '(context)\n', (500, 509), True, 'import pyopencl as cl\n'), ((701, 718), 'numpy.isscalar', 'np.isscalar', (['axes'], {}), '(axes)\n', (712, 718), True, 'import numpy as np\n'), ((1187, 1243), 'gpyfft.fft.FFT', 'FFT', (['context', 'queue', 'arr'], {'axes': 'axes', 'fast_math': 'fast_math'}), '(context, queue, arr, axes=axes, fast_math=fast_math)\n', (1190, 1243), False, 'from gpyfft.fft import FFT\n'), ((3281, 3307), 'numpy.iscomplexobj', 'np.iscomplexobj', (['input_arr'], {}), '(input_arr)\n', (3296, 3307), True, 'import numpy as np\n'), ((3398, 3429), 'pyopencl.array.to_device', 'cla.to_device', (['queue', 'input_arr'], {}), '(queue, input_arr)\n', (3411, 3429), True, 'import pyopencl.array as cla\n'), ((3657, 3685), 'pyopencl.array.empty_like', 'cla.empty_like', (['_input_array'], {}), '(_input_array)\n', (3671, 3685), True, 'import pyopencl.array as cla\n')] |
import cv2
import numpy as np
import scipy.fftpack
import scipy.signal
from matplotlib import pyplot
# from eulerian_magnification.io import play_vid_data
from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid
from eulerian_magnification.transforms import temporal_bandpass_filter
def eulerian_magnification(vid_data, fps, freq_min, freq_max, amplification, pyramid_levels=4, skip_levels_at_top=2):
vid_pyramid = create_laplacian_video_pyramid(vid_data, pyramid_levels=pyramid_levels)
for i, vid in enumerate(vid_pyramid):
if i < skip_levels_at_top or i >= len(vid_pyramid) - 1:
# ignore the top and bottom of the pyramid. One end has too much noise and the other end is the
# gaussian representation
continue
bandpassed = temporal_bandpass_filter(vid, fps, freq_min=freq_min, freq_max=freq_max, amplification_factor=amplification)
# play_vid_data(bandpassed)
vid_pyramid[i] += bandpassed
# play_vid_data(vid_pyramid[i])
vid_data = collapse_laplacian_video_pyramid(vid_pyramid)
return vid_data
def show_frequencies(vid_data, fps, bounds=None):
"""Graph the average value of the video as well as the frequency strength"""
averages = []
if bounds:
for x in range(1, vid_data.shape[0] - 1):
averages.append(vid_data[x, bounds[2]:bounds[3], bounds[0]:bounds[1], :].sum())
else:
for x in range(1, vid_data.shape[0] - 1):
averages.append(vid_data[x, :, :, :].sum())
averages = averages - min(averages)
charts_x = 1
charts_y = 2
pyplot.figure(figsize=(20, 10))
pyplot.subplots_adjust(hspace=.7)
pyplot.subplot(charts_y, charts_x, 1)
pyplot.title("Pixel Average")
pyplot.xlabel("Time")
pyplot.ylabel("Brightness")
pyplot.plot(averages)
freqs = scipy.fftpack.fftfreq(len(averages), d=1.0 / fps)
fft = abs(scipy.fftpack.fft(averages))
idx = np.argsort(freqs)
pyplot.subplot(charts_y, charts_x, 2)
pyplot.title("FFT")
pyplot.xlabel("Freq (Hz)")
freqs = freqs[idx]
fft = fft[idx]
freqs = freqs[len(freqs) // 2 + 1:]
fft = fft[len(fft) // 2 + 1:]
pyplot.plot(freqs, abs(fft))
pyplot.show()
def gaussian_video(video, shrink_multiple):
"""Create a gaussian representation of a video"""
vid_data = None
for x in range(0, video.shape[0]):
frame = video[x]
gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
gauss_copy[:] = frame
for i in range(shrink_multiple):
gauss_copy = cv2.pyrDown(gauss_copy)
if x == 0:
vid_data = np.zeros((video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3))
vid_data[x] = gauss_copy
return vid_data
def laplacian_video(video, shrink_multiple):
vid_data = None
frame_count, height, width, colors = video.shape
for i, frame in enumerate(video):
gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
gauss_copy[:] = frame
for _ in range(shrink_multiple):
prev_copy = gauss_copy[:]
gauss_copy = cv2.pyrDown(gauss_copy)
laplacian = prev_copy - cv2.pyrUp(gauss_copy)
if vid_data is None:
vid_data = np.zeros((frame_count, laplacian.shape[0], laplacian.shape[1], 3))
vid_data[i] = laplacian
return vid_data
def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'):
"""Combine a gaussian video representation with the original and save to file"""
width, height = get_frame_dimensions(orig_video[0])
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
print("Outputting to %s" % save_filename)
writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1)
for x in range(0, g_video.shape[0]):
img = np.ndarray(shape=g_video[x].shape, dtype='float')
img[:] = g_video[x]
for i in range(enlarge_multiple):
img = cv2.pyrUp(img)
img[:height, :width] = img[:height, :width] + orig_video[x]
res = cv2.convertScaleAbs(img[:height, :width])
writer.write(res)
def get_frame_dimensions(frame):
"""Get the dimensions of a single frame"""
height, width = frame.shape[:2]
return width, height
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = scipy.signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = scipy.signal.lfilter(b, a, data, axis=0)
return y
| [
"matplotlib.pyplot.title",
"cv2.VideoWriter_fourcc",
"eulerian_magnification.pyramid.create_laplacian_video_pyramid",
"numpy.argsort",
"matplotlib.pyplot.figure",
"cv2.VideoWriter",
"cv2.pyrDown",
"numpy.ndarray",
"cv2.convertScaleAbs",
"matplotlib.pyplot.show",
"eulerian_magnification.transform... | [((473, 544), 'eulerian_magnification.pyramid.create_laplacian_video_pyramid', 'create_laplacian_video_pyramid', (['vid_data'], {'pyramid_levels': 'pyramid_levels'}), '(vid_data, pyramid_levels=pyramid_levels)\n', (503, 544), False, 'from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid\n'), ((1080, 1125), 'eulerian_magnification.pyramid.collapse_laplacian_video_pyramid', 'collapse_laplacian_video_pyramid', (['vid_pyramid'], {}), '(vid_pyramid)\n', (1112, 1125), False, 'from eulerian_magnification.pyramid import create_laplacian_video_pyramid, collapse_laplacian_video_pyramid\n'), ((1651, 1682), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1664, 1682), False, 'from matplotlib import pyplot\n'), ((1687, 1721), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'hspace': '(0.7)'}), '(hspace=0.7)\n', (1709, 1721), False, 'from matplotlib import pyplot\n'), ((1726, 1763), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['charts_y', 'charts_x', '(1)'], {}), '(charts_y, charts_x, 1)\n', (1740, 1763), False, 'from matplotlib import pyplot\n'), ((1768, 1797), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Pixel Average"""'], {}), "('Pixel Average')\n", (1780, 1797), False, 'from matplotlib import pyplot\n'), ((1802, 1823), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time"""'], {}), "('Time')\n", (1815, 1823), False, 'from matplotlib import pyplot\n'), ((1828, 1855), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Brightness"""'], {}), "('Brightness')\n", (1841, 1855), False, 'from matplotlib import pyplot\n'), ((1860, 1881), 'matplotlib.pyplot.plot', 'pyplot.plot', (['averages'], {}), '(averages)\n', (1871, 1881), False, 'from matplotlib import pyplot\n'), ((1998, 2015), 'numpy.argsort', 'np.argsort', (['freqs'], {}), '(freqs)\n', (2008, 2015), True, 'import numpy as np\n'), ((2021, 2058), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['charts_y', 'charts_x', '(2)'], {}), '(charts_y, charts_x, 2)\n', (2035, 2058), False, 'from matplotlib import pyplot\n'), ((2063, 2082), 'matplotlib.pyplot.title', 'pyplot.title', (['"""FFT"""'], {}), "('FFT')\n", (2075, 2082), False, 'from matplotlib import pyplot\n'), ((2087, 2113), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Freq (Hz)"""'], {}), "('Freq (Hz)')\n", (2100, 2113), False, 'from matplotlib import pyplot\n'), ((2269, 2282), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2280, 2282), False, 'from matplotlib import pyplot\n'), ((3696, 3727), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (3718, 3727), False, 'import cv2\n'), ((3787, 3850), 'cv2.VideoWriter', 'cv2.VideoWriter', (['save_filename', 'fourcc', 'fps', '(width, height)', '(1)'], {}), '(save_filename, fourcc, fps, (width, height), 1)\n', (3802, 3850), False, 'import cv2\n'), ((840, 952), 'eulerian_magnification.transforms.temporal_bandpass_filter', 'temporal_bandpass_filter', (['vid', 'fps'], {'freq_min': 'freq_min', 'freq_max': 'freq_max', 'amplification_factor': 'amplification'}), '(vid, fps, freq_min=freq_min, freq_max=freq_max,\n amplification_factor=amplification)\n', (864, 952), False, 'from eulerian_magnification.transforms import temporal_bandpass_filter\n'), ((2488, 2532), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'frame.shape', 'dtype': '"""float"""'}), "(shape=frame.shape, dtype='float')\n", (2498, 2532), True, 'import numpy as np\n'), ((3001, 3045), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'frame.shape', 'dtype': '"""float"""'}), "(shape=frame.shape, dtype='float')\n", (3011, 3045), True, 'import numpy as np\n'), ((3906, 3955), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'g_video[x].shape', 'dtype': '"""float"""'}), "(shape=g_video[x].shape, dtype='float')\n", (3916, 3955), True, 'import numpy as np\n'), ((4142, 4183), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['img[:height, :width]'], {}), '(img[:height, :width])\n', (4161, 4183), False, 'import cv2\n'), ((2629, 2652), 'cv2.pyrDown', 'cv2.pyrDown', (['gauss_copy'], {}), '(gauss_copy)\n', (2640, 2652), False, 'import cv2\n'), ((2696, 2767), 'numpy.zeros', 'np.zeros', (['(video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3)'], {}), '((video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3))\n', (2704, 2767), True, 'import numpy as np\n'), ((3181, 3204), 'cv2.pyrDown', 'cv2.pyrDown', (['gauss_copy'], {}), '(gauss_copy)\n', (3192, 3204), False, 'import cv2\n'), ((3238, 3259), 'cv2.pyrUp', 'cv2.pyrUp', (['gauss_copy'], {}), '(gauss_copy)\n', (3247, 3259), False, 'import cv2\n'), ((3313, 3379), 'numpy.zeros', 'np.zeros', (['(frame_count, laplacian.shape[0], laplacian.shape[1], 3)'], {}), '((frame_count, laplacian.shape[0], laplacian.shape[1], 3))\n', (3321, 3379), True, 'import numpy as np\n'), ((4044, 4058), 'cv2.pyrUp', 'cv2.pyrUp', (['img'], {}), '(img)\n', (4053, 4058), False, 'import cv2\n')] |
"""Tests for module gromov """
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import ot
def test_gromov():
n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=4)
xt = xs[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
G = ot.gromov.gromov_wasserstein(C1, C2, p, q, 'square_loss', verbose=True)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
Id = (1 / (1.0 * n_samples)) * np.eye(n_samples, n_samples)
np.testing.assert_allclose(
G, np.flipud(Id), atol=1e-04)
gw, log = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=True)
gw_val = ot.gromov.gromov_wasserstein2(C1, C2, p, q, 'kl_loss', log=False)
G = log['T']
np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1)
np.testing.assert_allclose(gw, gw_val, atol=1e-1, rtol=1e-1) # cf log=False
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
def test_entropic_gromov():
n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42)
xt = xs[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
G = ot.gromov.entropic_gromov_wasserstein(
C1, C2, p, q, 'square_loss', epsilon=5e-4, verbose=True)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
gw, log = ot.gromov.entropic_gromov_wasserstein2(
C1, C2, p, q, 'kl_loss', epsilon=1e-2, log=True)
G = log['T']
np.testing.assert_allclose(gw, 0, atol=1e-1, rtol=1e-1)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
def test_gromov_barycenter():
ns = 50
nt = 60
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
Cb = ot.gromov.gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'square_loss', # 5e-4,
max_iter=100, tol=1e-3,
verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'kl_loss', # 5e-4,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
def test_gromov_entropic_barycenter():
ns = 50
nt = 60
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
Cb = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'square_loss', 2e-3,
max_iter=100, tol=1e-3,
verbose=True)
np.testing.assert_allclose(Cb.shape, (n_samples, n_samples))
Cb2 = ot.gromov.entropic_gromov_barycenters(n_samples, [C1, C2],
[ot.unif(ns), ot.unif(nt)
], ot.unif(n_samples), [.5, .5],
'kl_loss', 2e-3,
max_iter=100, tol=1e-3)
np.testing.assert_allclose(Cb2.shape, (n_samples, n_samples))
def test_fgw():
n_samples = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
xs = ot.datasets.make_2D_samples_gauss(n_samples, mu_s, cov_s, random_state=42)
xt = xs[::-1].copy()
ys = np.random.randn(xs.shape[0], 2)
yt = ys[::-1].copy()
p = ot.unif(n_samples)
q = ot.unif(n_samples)
C1 = ot.dist(xs, xs)
C2 = ot.dist(xt, xt)
C1 /= C1.max()
C2 /= C2.max()
M = ot.dist(ys, yt)
M /= M.max()
G, log = ot.gromov.fused_gromov_wasserstein(M, C1, C2, p, q, 'square_loss', alpha=0.5, log=True)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence fgw
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence fgw
Id = (1 / (1.0 * n_samples)) * np.eye(n_samples, n_samples)
np.testing.assert_allclose(
G, np.flipud(Id), atol=1e-04) # cf convergence gromov
fgw, log = ot.gromov.fused_gromov_wasserstein2(M, C1, C2, p, q, 'square_loss', alpha=0.5, log=True)
G = log['T']
np.testing.assert_allclose(fgw, 0, atol=1e-1, rtol=1e-1)
# check constratints
np.testing.assert_allclose(
p, G.sum(1), atol=1e-04) # cf convergence gromov
np.testing.assert_allclose(
q, G.sum(0), atol=1e-04) # cf convergence gromov
def test_fgw_barycenter():
np.random.seed(42)
ns = 50
nt = 60
Xs, ys = ot.datasets.make_data_classif('3gauss', ns, random_state=42)
Xt, yt = ot.datasets.make_data_classif('3gauss2', nt, random_state=42)
ys = np.random.randn(Xs.shape[0], 2)
yt = np.random.randn(Xt.shape[0], 2)
C1 = ot.dist(Xs)
C2 = ot.dist(Xt)
n_samples = 3
X, C = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], [ot.unif(ns), ot.unif(nt)], [.5, .5], 0.5,
fixed_structure=False, fixed_features=False,
p=ot.unif(n_samples), loss_fun='square_loss',
max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
xalea = np.random.randn(n_samples, 2)
init_C = ot.dist(xalea, xalea)
X, C = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], ps=[ot.unif(ns), ot.unif(nt)], lambdas=[.5, .5], alpha=0.5,
fixed_structure=True, init_C=init_C, fixed_features=False,
p=ot.unif(n_samples), loss_fun='square_loss',
max_iter=100, tol=1e-3)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
init_X = np.random.randn(n_samples, ys.shape[1])
X, C, log = ot.gromov.fgw_barycenters(n_samples, [ys, yt], [C1, C2], [ot.unif(ns), ot.unif(nt)], [.5, .5], 0.5,
fixed_structure=False, fixed_features=True, init_X=init_X,
p=ot.unif(n_samples), loss_fun='square_loss',
max_iter=100, tol=1e-3, log=True)
np.testing.assert_allclose(C.shape, (n_samples, n_samples))
np.testing.assert_allclose(X.shape, (n_samples, ys.shape[1]))
| [
"ot.unif",
"numpy.random.seed",
"numpy.eye",
"ot.datasets.make_data_classif",
"numpy.random.randn",
"ot.dist",
"ot.gromov.entropic_gromov_wasserstein",
"numpy.testing.assert_allclose",
"ot.gromov.fused_gromov_wasserstein",
"ot.gromov.fused_gromov_wasserstein2",
"numpy.flipud",
"ot.gromov.entro... | [((250, 266), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (258, 266), True, 'import numpy as np\n'), ((280, 306), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (288, 306), True, 'import numpy as np\n'), ((319, 392), 'ot.datasets.make_2D_samples_gauss', 'ot.datasets.make_2D_samples_gauss', (['n_samples', 'mu_s', 'cov_s'], {'random_state': '(4)'}), '(n_samples, mu_s, cov_s, random_state=4)\n', (352, 392), False, 'import ot\n'), ((432, 450), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (439, 450), False, 'import ot\n'), ((460, 478), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (467, 478), False, 'import ot\n'), ((491, 506), 'ot.dist', 'ot.dist', (['xs', 'xs'], {}), '(xs, xs)\n', (498, 506), False, 'import ot\n'), ((517, 532), 'ot.dist', 'ot.dist', (['xt', 'xt'], {}), '(xt, xt)\n', (524, 532), False, 'import ot\n'), ((586, 657), 'ot.gromov.gromov_wasserstein', 'ot.gromov.gromov_wasserstein', (['C1', 'C2', 'p', 'q', '"""square_loss"""'], {'verbose': '(True)'}), "(C1, C2, p, q, 'square_loss', verbose=True)\n", (614, 657), False, 'import ot\n'), ((1028, 1092), 'ot.gromov.gromov_wasserstein2', 'ot.gromov.gromov_wasserstein2', (['C1', 'C2', 'p', 'q', '"""kl_loss"""'], {'log': '(True)'}), "(C1, C2, p, q, 'kl_loss', log=True)\n", (1057, 1092), False, 'import ot\n'), ((1109, 1174), 'ot.gromov.gromov_wasserstein2', 'ot.gromov.gromov_wasserstein2', (['C1', 'C2', 'p', 'q', '"""kl_loss"""'], {'log': '(False)'}), "(C1, C2, p, q, 'kl_loss', log=False)\n", (1138, 1174), False, 'import ot\n'), ((1202, 1255), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gw', '(0)'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(gw, 0, atol=0.1, rtol=0.1)\n', (1228, 1255), True, 'import numpy as np\n'), ((1265, 1323), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gw', 'gw_val'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(gw, gw_val, atol=0.1, rtol=0.1)\n', (1291, 1323), True, 'import numpy as np\n'), ((1635, 1651), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1643, 1651), True, 'import numpy as np\n'), ((1665, 1691), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (1673, 1691), True, 'import numpy as np\n'), ((1704, 1778), 'ot.datasets.make_2D_samples_gauss', 'ot.datasets.make_2D_samples_gauss', (['n_samples', 'mu_s', 'cov_s'], {'random_state': '(42)'}), '(n_samples, mu_s, cov_s, random_state=42)\n', (1737, 1778), False, 'import ot\n'), ((1818, 1836), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (1825, 1836), False, 'import ot\n'), ((1846, 1864), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (1853, 1864), False, 'import ot\n'), ((1877, 1892), 'ot.dist', 'ot.dist', (['xs', 'xs'], {}), '(xs, xs)\n', (1884, 1892), False, 'import ot\n'), ((1903, 1918), 'ot.dist', 'ot.dist', (['xt', 'xt'], {}), '(xt, xt)\n', (1910, 1918), False, 'import ot\n'), ((1972, 2073), 'ot.gromov.entropic_gromov_wasserstein', 'ot.gromov.entropic_gromov_wasserstein', (['C1', 'C2', 'p', 'q', '"""square_loss"""'], {'epsilon': '(0.0005)', 'verbose': '(True)'}), "(C1, C2, p, q, 'square_loss', epsilon=\n 0.0005, verbose=True)\n", (2009, 2073), False, 'import ot\n'), ((2306, 2398), 'ot.gromov.entropic_gromov_wasserstein2', 'ot.gromov.entropic_gromov_wasserstein2', (['C1', 'C2', 'p', 'q', '"""kl_loss"""'], {'epsilon': '(0.01)', 'log': '(True)'}), "(C1, C2, p, q, 'kl_loss', epsilon=\n 0.01, log=True)\n", (2344, 2398), False, 'import ot\n'), ((2431, 2484), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gw', '(0)'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(gw, 0, atol=0.1, rtol=0.1)\n', (2457, 2484), True, 'import numpy as np\n'), ((2776, 2836), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss"""', 'ns'], {'random_state': '(42)'}), "('3gauss', ns, random_state=42)\n", (2805, 2836), False, 'import ot\n'), ((2851, 2912), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss2"""', 'nt'], {'random_state': '(42)'}), "('3gauss2', nt, random_state=42)\n", (2880, 2912), False, 'import ot\n'), ((2925, 2936), 'ot.dist', 'ot.dist', (['Xs'], {}), '(Xs)\n', (2932, 2936), False, 'import ot\n'), ((2947, 2958), 'ot.dist', 'ot.dist', (['Xt'], {}), '(Xt)\n', (2954, 2958), False, 'import ot\n'), ((3362, 3422), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb.shape', '(n_samples, n_samples)'], {}), '(Cb.shape, (n_samples, n_samples))\n', (3388, 3422), True, 'import numpy as np\n'), ((3755, 3816), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb2.shape', '(n_samples, n_samples)'], {}), '(Cb2.shape, (n_samples, n_samples))\n', (3781, 3816), True, 'import numpy as np\n'), ((3903, 3963), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss"""', 'ns'], {'random_state': '(42)'}), "('3gauss', ns, random_state=42)\n", (3932, 3963), False, 'import ot\n'), ((3978, 4039), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss2"""', 'nt'], {'random_state': '(42)'}), "('3gauss2', nt, random_state=42)\n", (4007, 4039), False, 'import ot\n'), ((4052, 4063), 'ot.dist', 'ot.dist', (['Xs'], {}), '(Xs)\n', (4059, 4063), False, 'import ot\n'), ((4074, 4085), 'ot.dist', 'ot.dist', (['Xt'], {}), '(Xt)\n', (4081, 4085), False, 'import ot\n'), ((4540, 4600), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb.shape', '(n_samples, n_samples)'], {}), '(Cb.shape, (n_samples, n_samples))\n', (4566, 4600), True, 'import numpy as np\n'), ((4975, 5036), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Cb2.shape', '(n_samples, n_samples)'], {}), '(Cb2.shape, (n_samples, n_samples))\n', (5001, 5036), True, 'import numpy as np\n'), ((5108, 5124), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5116, 5124), True, 'import numpy as np\n'), ((5138, 5164), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (5146, 5164), True, 'import numpy as np\n'), ((5177, 5251), 'ot.datasets.make_2D_samples_gauss', 'ot.datasets.make_2D_samples_gauss', (['n_samples', 'mu_s', 'cov_s'], {'random_state': '(42)'}), '(n_samples, mu_s, cov_s, random_state=42)\n', (5210, 5251), False, 'import ot\n'), ((5292, 5323), 'numpy.random.randn', 'np.random.randn', (['xs.shape[0]', '(2)'], {}), '(xs.shape[0], 2)\n', (5307, 5323), True, 'import numpy as np\n'), ((5361, 5379), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (5368, 5379), False, 'import ot\n'), ((5389, 5407), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (5396, 5407), False, 'import ot\n'), ((5420, 5435), 'ot.dist', 'ot.dist', (['xs', 'xs'], {}), '(xs, xs)\n', (5427, 5435), False, 'import ot\n'), ((5446, 5461), 'ot.dist', 'ot.dist', (['xt', 'xt'], {}), '(xt, xt)\n', (5453, 5461), False, 'import ot\n'), ((5515, 5530), 'ot.dist', 'ot.dist', (['ys', 'yt'], {}), '(ys, yt)\n', (5522, 5530), False, 'import ot\n'), ((5565, 5657), 'ot.gromov.fused_gromov_wasserstein', 'ot.gromov.fused_gromov_wasserstein', (['M', 'C1', 'C2', 'p', 'q', '"""square_loss"""'], {'alpha': '(0.5)', 'log': '(True)'}), "(M, C1, C2, p, q, 'square_loss', alpha=\n 0.5, log=True)\n", (5599, 5657), False, 'import ot\n'), ((6043, 6136), 'ot.gromov.fused_gromov_wasserstein2', 'ot.gromov.fused_gromov_wasserstein2', (['M', 'C1', 'C2', 'p', 'q', '"""square_loss"""'], {'alpha': '(0.5)', 'log': '(True)'}), "(M, C1, C2, p, q, 'square_loss', alpha=\n 0.5, log=True)\n", (6078, 6136), False, 'import ot\n'), ((6159, 6213), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fgw', '(0)'], {'atol': '(0.1)', 'rtol': '(0.1)'}), '(fgw, 0, atol=0.1, rtol=0.1)\n', (6185, 6213), True, 'import numpy as np\n'), ((6465, 6483), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6479, 6483), True, 'import numpy as np\n'), ((6528, 6588), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss"""', 'ns'], {'random_state': '(42)'}), "('3gauss', ns, random_state=42)\n", (6557, 6588), False, 'import ot\n'), ((6603, 6664), 'ot.datasets.make_data_classif', 'ot.datasets.make_data_classif', (['"""3gauss2"""', 'nt'], {'random_state': '(42)'}), "('3gauss2', nt, random_state=42)\n", (6632, 6664), False, 'import ot\n'), ((6677, 6708), 'numpy.random.randn', 'np.random.randn', (['Xs.shape[0]', '(2)'], {}), '(Xs.shape[0], 2)\n', (6692, 6708), True, 'import numpy as np\n'), ((6719, 6750), 'numpy.random.randn', 'np.random.randn', (['Xt.shape[0]', '(2)'], {}), '(Xt.shape[0], 2)\n', (6734, 6750), True, 'import numpy as np\n'), ((6763, 6774), 'ot.dist', 'ot.dist', (['Xs'], {}), '(Xs)\n', (6770, 6774), False, 'import ot\n'), ((6785, 6796), 'ot.dist', 'ot.dist', (['Xt'], {}), '(Xt)\n', (6792, 6796), False, 'import ot\n'), ((7164, 7223), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['C.shape', '(n_samples, n_samples)'], {}), '(C.shape, (n_samples, n_samples))\n', (7190, 7223), True, 'import numpy as np\n'), ((7229, 7290), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X.shape', '(n_samples, ys.shape[1])'], {}), '(X.shape, (n_samples, ys.shape[1]))\n', (7255, 7290), True, 'import numpy as np\n'), ((7306, 7335), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(2)'], {}), '(n_samples, 2)\n', (7321, 7335), True, 'import numpy as np\n'), ((7350, 7371), 'ot.dist', 'ot.dist', (['xalea', 'xalea'], {}), '(xalea, xalea)\n', (7357, 7371), False, 'import ot\n'), ((7751, 7810), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['C.shape', '(n_samples, n_samples)'], {}), '(C.shape, (n_samples, n_samples))\n', (7777, 7810), True, 'import numpy as np\n'), ((7816, 7877), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X.shape', '(n_samples, ys.shape[1])'], {}), '(X.shape, (n_samples, ys.shape[1]))\n', (7842, 7877), True, 'import numpy as np\n'), ((7894, 7933), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'ys.shape[1]'], {}), '(n_samples, ys.shape[1])\n', (7909, 7933), True, 'import numpy as np\n'), ((8326, 8385), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['C.shape', '(n_samples, n_samples)'], {}), '(C.shape, (n_samples, n_samples))\n', (8352, 8385), True, 'import numpy as np\n'), ((8391, 8452), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X.shape', '(n_samples, ys.shape[1])'], {}), '(X.shape, (n_samples, ys.shape[1]))\n', (8417, 8452), True, 'import numpy as np\n'), ((908, 936), 'numpy.eye', 'np.eye', (['n_samples', 'n_samples'], {}), '(n_samples, n_samples)\n', (914, 936), True, 'import numpy as np\n'), ((984, 997), 'numpy.flipud', 'np.flipud', (['Id'], {}), '(Id)\n', (993, 997), True, 'import numpy as np\n'), ((3148, 3166), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (3155, 3166), False, 'import ot\n'), ((3596, 3614), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (3603, 3614), False, 'import ot\n'), ((4302, 4320), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (4309, 4320), False, 'import ot\n'), ((4801, 4819), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (4808, 4819), False, 'import ot\n'), ((5897, 5925), 'numpy.eye', 'np.eye', (['n_samples', 'n_samples'], {}), '(n_samples, n_samples)\n', (5903, 5925), True, 'import numpy as np\n'), ((5973, 5986), 'numpy.flipud', 'np.flipud', (['Id'], {}), '(Id)\n', (5982, 5986), True, 'import numpy as np\n'), ((3080, 3091), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (3087, 3091), False, 'import ot\n'), ((3093, 3104), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (3100, 3104), False, 'import ot\n'), ((3527, 3538), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (3534, 3538), False, 'import ot\n'), ((3540, 3551), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (3547, 3551), False, 'import ot\n'), ((4225, 4236), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (4232, 4236), False, 'import ot\n'), ((4238, 4249), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (4245, 4249), False, 'import ot\n'), ((4723, 4734), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (4730, 4734), False, 'import ot\n'), ((4736, 4747), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (4743, 4747), False, 'import ot\n'), ((6888, 6899), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (6895, 6899), False, 'import ot\n'), ((6901, 6912), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (6908, 6912), False, 'import ot\n'), ((7053, 7071), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (7060, 7071), False, 'import ot\n'), ((7640, 7658), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (7647, 7658), False, 'import ot\n'), ((8011, 8022), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (8018, 8022), False, 'import ot\n'), ((8024, 8035), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (8031, 8035), False, 'import ot\n'), ((8200, 8218), 'ot.unif', 'ot.unif', (['n_samples'], {}), '(n_samples)\n', (8207, 8218), False, 'import ot\n'), ((7447, 7458), 'ot.unif', 'ot.unif', (['ns'], {}), '(ns)\n', (7454, 7458), False, 'import ot\n'), ((7460, 7471), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (7467, 7471), False, 'import ot\n')] |
import docker
import os
import glob
import json
import datetime
import numpy
client = docker.APIClient(base_url='unix://var/run/docker.sock')
def mem_stats(container_id):
docker_stats = client.stats(container_id, decode=True, stream=False)
mem_stats = docker_stats['memory_stats']
wanted_keys = ['usage', 'max_usage']
container_mem_stats = dict(
(k, mem_stats[k]) for k in wanted_keys if k in mem_stats.keys())
# ['stats']['rss']
container_mem_stats['name'] = docker_stats['name']
return container_mem_stats
def collect_stats(sockshop_containers, ntimes=1):
# {"name" : "component name", "timestamps": "datetime", "docker_stats":...}
stats_average = dict()
for c_id in sockshop_containers:
stats_average[c_id] = {"usage": [],
"max_usage": [], "rss": [], "total_rss": []}
for i in range(ntimes):
stats = []
for container_id in sockshop_containers:
docker_stats = client.stats(
container_id, decode=True, stream=False)
container_stats = dict()
container_stats["timestamp"] = datetime.datetime.now().isoformat()
container_stats["name"] = docker_stats['name']
container_stats["docker_stats"] = docker_stats
stats.append(container_stats)
# print(stats_average)
stats_average[container_id]['name'] = docker_stats['name']
stats_average[container_id]['usage'].append(
docker_stats['memory_stats']['usage'])
stats_average[container_id]['max_usage'].append(
docker_stats['memory_stats']['max_usage'])
stats_average[container_id]['rss'].append(
docker_stats['memory_stats']['stats']['rss'])
stats_average[container_id]['total_rss'].append(
docker_stats['memory_stats']['stats']['total_rss'])
with open("tosker_{0}.log".format(ntimes), 'w') as outfile:
json.dump(stats, outfile)
# print(stats_average)
n_containers = 0
for key, value in stats_average.items():
stats_average[key]["avg_usage"] = numpy.mean(value['usage'])
stats_average[key]["avg_max_usage"] = numpy.mean(value['max_usage'])
stats_average[key]["avg_rss"] = numpy.mean(value['rss'])
stats_average[key]["avg_total_rss"] = numpy.mean(value['total_rss'])
n_containers += 1
container_usage = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_usage"]))
container_max_usage = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_max_usage"]))
container_rss = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_rss"]))
container_total_rss = max(stats_average.keys(), key=(
lambda k: stats_average[k]["avg_total_rss"]))
avg_usage = []
avg_max_usage = []
avg_rss = []
avg_total_rss = []
for key, value in stats_average.items():
avg_usage.append(value["avg_usage"])
avg_max_usage.append(value["avg_max_usage"])
avg_rss.append(value["avg_rss"])
avg_total_rss.append(value["avg_total_rss"])
# print(stats_average)
print(n_containers, "containers")
print("Usage :{0},\nMax usage:{1},\nRss:{2},\nTotal rss:{3}".format(
numpy.mean(avg_usage) / 1024 / 1024,
numpy.mean(avg_max_usage) / 1024 / 1024,
numpy.mean(avg_rss) / 1024 / 1024,
numpy.mean(avg_total_rss) / 1024 / 1024
))
print("Max containers: \n name {0} usage:{1},\n name {2} Max usage:{3},Name {4} Rss:{5}, \n Name {6} Total rss:{7}".format(
stats_average[container_usage]['name'], stats_average[container_usage]["avg_usage"] / 1024 / 1024,
stats_average[container_max_usage]['name'], stats_average[container_usage]["avg_max_usage"] / 1024 / 1024,
stats_average[container_rss]['name'], stats_average[container_usage]["avg_rss"] / 1024 / 1024,
stats_average[container_usage]['name'], stats_average[container_usage]["avg_total_rss"] / 1024 / 1024
))
# '{p[first]} {p[last]}'.format(p=person)
sockshop_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "sockshop-app")
print(sockshop_dir)
tosker_yaml_files = list()
os.chdir(sockshop_dir)
for tosker_file in glob.glob("*.yaml"):
tosker_yaml_files.append(os.path.join(sockshop_dir, tosker_file))
# run tosker
# for tosker_yaml_file in tosker_yaml_files:
# print("Starting {} yaml file ...".format(
# os.path.basename(os.path.normpath(tosker_yaml_file))))
# os.system("tosker {0} create start".format(tosker_yaml_file))
sockshop_containers = [container['Id'] for container in client.containers(
) if "sockshop" in container['Names'][0]]
# collect_stats(sockshop_containers, ntimes=3)
for i in map(mem_stats, sockshop_containers):
print(i)
# client.stats(client.containers()[0]['Id'], stream=False)['memory_stats']
# memory_stats :{
# 'limit': 8274780160,
# 'max_usage': 634081280,
# 'usage': 598896640
# 'stats': {'inactive_anon': 16384,
# 'rss': 53108736,
# 'total_rss': 53108736
# 'mapped_file': 4907008,
# 'dirty': 3379200,
# 'active_file': 395481088,
# 'unevictable': 0,
# 'total_writeback': 0,
# 'total_cache': 545763328, 'active_anon': 53432320, 'total_pgpgout': 1152618,
# 'total_active_file': 395481088,
# 'inactive_file': 149889024, 'writeback': 0, 'total_unevictable': 0,
# 'total_pgpgin': 1290140, 'rss_huge': 16777216, 'pgpgin': 1290140,
# 'total_dirty': 3379200, 'pgmajfault': 0, 'total_mapped_file': 4907008,
# 'cache': 545763328, 'total_inactive_file': 149889024,
# 'total_inactive_anon': 16384, 'total_pgfault': 1497954, 'total_active_anon': 53432320,
# 'total_rss_huge': 16777216, 'hierarchical_memory_limit': 9223372036854771712,
# 'total_pgmajfault': 0, 'pgfault': 1497954, 'pgpgout': 1152618
# },
# }
# {
# "storage_stats":{ },
# "memory_stats":{
# "name":"/sockshop_group-go.front-end-node",
# "cpu_stats":{ },
# "precpu_stats":{ },
# "read":"2017-11-10T09:39:58.567678054Z",
# "num_procs":0,
# "blkio_stats":{ },
# "networks":{ },
# "preread":"2017-11-10T09:39:57.56778472Z",
# "pids_stats":{ },
# "id":"b3b991051ff614137685ccd6cd57dd02e63aacfe1a22440b1d81024f7d644466"
# }
| [
"json.dump",
"os.path.abspath",
"docker.APIClient",
"datetime.datetime.now",
"numpy.mean",
"glob.glob",
"os.path.join",
"os.chdir"
] | [((87, 142), 'docker.APIClient', 'docker.APIClient', ([], {'base_url': '"""unix://var/run/docker.sock"""'}), "(base_url='unix://var/run/docker.sock')\n", (103, 142), False, 'import docker\n'), ((4274, 4296), 'os.chdir', 'os.chdir', (['sockshop_dir'], {}), '(sockshop_dir)\n', (4282, 4296), False, 'import os\n'), ((4316, 4335), 'glob.glob', 'glob.glob', (['"""*.yaml"""'], {}), "('*.yaml')\n", (4325, 4335), False, 'import glob\n'), ((2161, 2187), 'numpy.mean', 'numpy.mean', (["value['usage']"], {}), "(value['usage'])\n", (2171, 2187), False, 'import numpy\n'), ((2234, 2264), 'numpy.mean', 'numpy.mean', (["value['max_usage']"], {}), "(value['max_usage'])\n", (2244, 2264), False, 'import numpy\n'), ((2305, 2329), 'numpy.mean', 'numpy.mean', (["value['rss']"], {}), "(value['rss'])\n", (2315, 2329), False, 'import numpy\n'), ((2376, 2406), 'numpy.mean', 'numpy.mean', (["value['total_rss']"], {}), "(value['total_rss'])\n", (2386, 2406), False, 'import numpy\n'), ((4182, 4207), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4197, 4207), False, 'import os\n'), ((4366, 4405), 'os.path.join', 'os.path.join', (['sockshop_dir', 'tosker_file'], {}), '(sockshop_dir, tosker_file)\n', (4378, 4405), False, 'import os\n'), ((2000, 2025), 'json.dump', 'json.dump', (['stats', 'outfile'], {}), '(stats, outfile)\n', (2009, 2025), False, 'import json\n'), ((1138, 1161), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1159, 1161), False, 'import datetime\n'), ((3328, 3349), 'numpy.mean', 'numpy.mean', (['avg_usage'], {}), '(avg_usage)\n', (3338, 3349), False, 'import numpy\n'), ((3373, 3398), 'numpy.mean', 'numpy.mean', (['avg_max_usage'], {}), '(avg_max_usage)\n', (3383, 3398), False, 'import numpy\n'), ((3422, 3441), 'numpy.mean', 'numpy.mean', (['avg_rss'], {}), '(avg_rss)\n', (3432, 3441), False, 'import numpy\n'), ((3465, 3490), 'numpy.mean', 'numpy.mean', (['avg_total_rss'], {}), '(avg_total_rss)\n', (3475, 3490), False, 'import numpy\n')] |
from telewavesim import utils as ut
from telewavesim.rmat_f import plane as pw_f
from telewavesim import conf as cf
import numpy as np
import pyfftw
from conftest import load_params
def test_plane_obs(load_params):
yx, yy, yz = pw_f.plane_obs(cf.nt, cf.nlay, np.array(cf.wvtype, dtype='c'))
ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx))
uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy))
uz = np.real(pyfftw.interfaces.numpy_fft.fft(yz))
# seismogram should be maximized on vertical component
assert np.max(np.abs(uz)) > np.max(np.abs(ux)) > np.max(np.abs(uy)), \
'Failed! Energy is not maximized on vertical component'
# tangential component should all be close to zero
assert np.allclose(uy, np.zeros(len(uy))), 'non-zero values in uy'
def test_plane_land(load_params):
yx, yy, yz = pw_f.plane_land(cf.nt, cf.nlay, np.array(cf.wvtype, dtype='c'))
ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx))
uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy))
uz = np.real(pyfftw.interfaces.numpy_fft.fft(yz))
# seismogram should be maximized on vertical component
assert np.max(np.abs(uz)) > np.max(np.abs(ux)) > np.max(np.abs(uy)), \
'Failed! Energy is not maximized on vertical component'
# tangential component should all be close to zero
assert np.allclose(uy, np.zeros(len(uy))), 'non-zero values in uy'
trxyz = ut.get_trxyz(ux, uy, uz)
tfs = ut.tf_from_xyz(trxyz)
nt = tfs[0].stats.npts
# zero-lag should be maximized on radial component
assert tfs[0].data[int(nt/2)] > tfs[1].data[int(nt/2)], \
'Failed! Zero-lag is not maximized on radial component'
| [
"numpy.abs",
"telewavesim.utils.get_trxyz",
"pyfftw.interfaces.numpy_fft.fft",
"telewavesim.utils.tf_from_xyz",
"numpy.array"
] | [((1404, 1428), 'telewavesim.utils.get_trxyz', 'ut.get_trxyz', (['ux', 'uy', 'uz'], {}), '(ux, uy, uz)\n', (1416, 1428), True, 'from telewavesim import utils as ut\n'), ((1439, 1460), 'telewavesim.utils.tf_from_xyz', 'ut.tf_from_xyz', (['trxyz'], {}), '(trxyz)\n', (1453, 1460), True, 'from telewavesim import utils as ut\n'), ((266, 296), 'numpy.array', 'np.array', (['cf.wvtype'], {'dtype': '"""c"""'}), "(cf.wvtype, dtype='c')\n", (274, 296), True, 'import numpy as np\n'), ((315, 350), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yx'], {}), '(yx)\n', (346, 350), False, 'import pyfftw\n'), ((369, 404), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yy'], {}), '(yy)\n', (400, 404), False, 'import pyfftw\n'), ((423, 458), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yz'], {}), '(yz)\n', (454, 458), False, 'import pyfftw\n'), ((871, 901), 'numpy.array', 'np.array', (['cf.wvtype'], {'dtype': '"""c"""'}), "(cf.wvtype, dtype='c')\n", (879, 901), True, 'import numpy as np\n'), ((920, 955), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yx'], {}), '(yx)\n', (951, 955), False, 'import pyfftw\n'), ((974, 1009), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yy'], {}), '(yy)\n', (1005, 1009), False, 'import pyfftw\n'), ((1028, 1063), 'pyfftw.interfaces.numpy_fft.fft', 'pyfftw.interfaces.numpy_fft.fft', (['yz'], {}), '(yz)\n', (1059, 1063), False, 'import pyfftw\n'), ((538, 548), 'numpy.abs', 'np.abs', (['uz'], {}), '(uz)\n', (544, 548), True, 'import numpy as np\n'), ((559, 569), 'numpy.abs', 'np.abs', (['ux'], {}), '(ux)\n', (565, 569), True, 'import numpy as np\n'), ((580, 590), 'numpy.abs', 'np.abs', (['uy'], {}), '(uy)\n', (586, 590), True, 'import numpy as np\n'), ((1143, 1153), 'numpy.abs', 'np.abs', (['uz'], {}), '(uz)\n', (1149, 1153), True, 'import numpy as np\n'), ((1164, 1174), 'numpy.abs', 'np.abs', (['ux'], {}), '(ux)\n', (1170, 1174), True, 'import numpy as np\n'), ((1185, 1195), 'numpy.abs', 'np.abs', (['uy'], {}), '(uy)\n', (1191, 1195), True, 'import numpy as np\n')] |
import base64
import os
import re
import random
import threading
import cv2
import numpy as np
from flask import *
from auth.Auth import Auth
from dao.FollowDAO import FollowDAO
from dao.FollowerDAO import FollowerDAO
from dao.FollowingDAO import FollowingDAO
from dao.ImageDAO import WorkDAO
from dao.InformationDAO import InformationDAO
from dao.UserDAO import UserDAO
from dao.addressDAO import AddressDAO
from pojo.Image import Work
from pojo.Information import Information
from pojo.User import User
from operation.tricks import *
from operation.ai import *
from Result import *
from pojo.address import Address
app = Flask(__name__)
# 登录账户
# 已修改
@app.route('/user/login', methods=['POST'])
def login():
data = request.get_json()
if 'phone' not in data or 'password' not in data:
return "信息缺失"
phone = data['phone']
password = data['password']
# 判断电话号码是否为空
if phone is None:
return "The phone number is empty!"
# 判断密码是否为空
if password is None:
return "The password is empty!"
user = User()
user.set_phone(phone)
user.set_password(password)
try:
user = UserDAO().retrieve(user)
except:
return "Server Failure!"
# 用户不存在
if user is None:
result = return_status(-1)
return jsonify(result)
# 授权
result = Auth.authorize(user)
return jsonify(result)
# 注册账户
# 已修改
@app.route('/user/register', methods=['POST'])
def register():
data = request.get_json()
if 'phone' not in data or 'password' not in data:
return "信息缺失"
phone = data['phone']
password = data['password']
# 判断电话号码是否为空
if phone is None:
return "The phone number is empty!"
# 判断密码是否为空
if password is None:
return "The password is empty!"
# 检测手机是否已经使用
phone_is_used = verify_phone(phone)
if phone_is_used:
result = return_status(-1) # 手机号码被使用
return jsonify(result)
# 检测手机格式是否正确
phone_format_false = verify_phone_format(phone)
if phone_format_false:
result = return_status(-2) # 手机格式不正确
return jsonify(result)
user = User()
user.set_phone(phone)
user.set_password(password)
try:
user_dao = UserDAO()
user_dao.add(user)
result = return_status(0)
return jsonify(result) # 注册成功
except:
return "Server failure!"
# 验证电话号码
def verify_phone(phone):
return False
# 验证手机格式
def verify_phone_format(phone):
return False
# 退出账号
@app.route('/user/logout', methods=['GET'])
def logout():
result = return_status(0)
return jsonify(result)
# 获取个人信息
# 已修改
@app.route('/user/profile', methods=['GET'])
def getInformation():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
user_id = request.args.get('userid')
information = Information()
if user_id is None:
# user_id空取JWT中id
information.set_user_id(auth_user_id)
else:
# user_id不为空取user_id
information.set_user_id(user_id)
try:
information = InformationDAO().retrieve(information)
if information is None:
# 用户不存在
result = return_status(-1)
return jsonify(result)
else:
# 返回用户信息
result = return_Information(0, information)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
# 修改个人信息
# 已修改
@app.route('/user/profile', methods=['POST'])
def modifyInformation():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
information = Information()
information.set_user_id(auth_user_id)
data = request.get_json()
if 'NickName' not in data:
return "上传的信息不完整"
nick_name = data['NickName']
nick_name = str(nick_name)
information.set_nick_name(nick_name)
if 'Avatar' not in data:
return "上传的信息不完整"
avatar = data['Avatar']
avatar = str(avatar)
information.set_avatar(avatar)
if 'Signature' not in data:
return "上传的信息不完整"
signature = data['Signature']
signature = str(signature)
information.set_signature(signature)
if 'BackgroundPhoto' not in data:
return "上传的信息不完整"
background_photo = data['BackgroundPhoto']
background_photo = str(background_photo)
information.set_background_photo(background_photo)
information_dao = InformationDAO()
result = information_dao.update(information)
result = return_status(result)
return jsonify(result)
# 创建文件夹
def mkdir(folder_path):
folder = os.path.exists(folder_path)
if not folder:
os.makedirs(folder_path)
return folder_path
# 上传头像
@app.route('/user/avatar', methods=['POST'])
def upload_avatar():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
# 设置路径
folder_path = 'avatar/' + str(auth_user_id)
mkdir(folder_path)
information = Information()
information.set_user_id(auth_user_id)
path = folder_path + '/avatar.jpg'
information.set_avatar(path)
# 读取头像图片
try:
avatar = request.get_data()
if avatar is None:
return "上传的图片为空"
with open(path, 'wb') as f:
f.write(avatar)
except:
result = return_status(-2)
return jsonify(result)
# 数据库修改
information_dao = InformationDAO()
try:
result = information_dao.update_avatar(information)
if result is not None:
result = return_homepage(result, path)
return jsonify(result)
else:
result = return_status(-2)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
# 上传个人主页图
@app.route('/user/homepage', methods=['POST'])
def upload_homepage():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
# 设置路径
folder_path = 'background/' + str(auth_user_id)
mkdir(folder_path)
information = Information()
path = folder_path + '/background.jpg'
information.set_user_id(auth_user_id)
information.set_background_photo(path)
# 读取背景图片
try:
homepage = request.get_data()
if homepage is None:
return "上传的图片为空"
with open(path, 'wb') as f:
f.write(homepage)
except:
result = return_status(-2)
return jsonify(result)
# 数据库修改
information_dao = InformationDAO()
try:
result = information_dao.update_background_photo(information)
if result is not None:
result = return_homepage(result, path)
return jsonify(result)
else:
result = return_status(-2)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
# 获取我关注的列表
@app.route('/user/following', methods=['GET'])
def following():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
following_dao = FollowingDAO()
try:
followings = following_dao.retrieve(retrieve_user.get_user_id())
results = return_following(followings)
return jsonify(results)
except:
result = return_status(-2)
return jsonify(result)
# 点击/取消关注
@app.route('/user/follow', methods=['POST'])
def follow():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
data = request.get_json()
if 'UserID' not in data or 'Cancel' not in data:
return "信息缺失"
user_id = data['UserID']
cancel_follow = data['Cancel']
follow_dao = FollowDAO()
if cancel_follow == 'True' or cancel_follow == 'true' or cancel_follow is True:
follow_dao.delete(user_id, auth_user_id)
result = return_status(1)
return jsonify(result)
if cancel_follow == 'False' or cancel_follow == 'false' or cancel_follow is False:
follow_dao.add(user_id, auth_user_id)
result = return_status(0)
return jsonify(result)
else:
result = return_status(-1)
return jsonify(result)
# 获取关注我的列表
@app.route('/user/follower', methods=['GET'])
def follower():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
follower_dao = FollowerDAO()
try:
followers = follower_dao.retrieve(retrieve_user.get_user_id())
results = return_follower(followers)
return jsonify(results)
except:
result = return_status(-2)
return jsonify(result)
# 获取11位随机数
def get_work_id():
return random.randint(10000000000, 99999999999)
# 获取个人作品
@app.route('/illustration/mywork', methods=['GET'])
def get_myworks():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
user_id = request.args.get('userid')
if user_id is None:
return "信息不完整"
# 获取用户
user = User()
user.set_user_id(user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
type = request.args.get('type')
if type is None:
return "信息不完整"
type = str(type)
top = request.args.get('top')
if top is None:
return "信息不完整"
top = str(top)
work_dao = WorkDAO()
works = work_dao.retrieve(user_id)
if type == 'home':
if top is 'true' or top is 'True':
pass
else:
result = return_home(works)
return jsonify(result)
if type == 'detail':
if top is 'true' or top is 'True':
pass
else:
result = return_detail(works)
return jsonify(result)
else:
return "信息不正确"
# 获取作品图片
@app.route('/illustration/image', methods=['GET'])
def get_image():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
# user = User()
# user.set_user_id(auth_user_id)
# user_dao = UserDAO()
# try:
# retrieve_user = user_dao.retrieve(user)
# except:
# result = return_status(-2)
# return jsonify(result)
# 用户不存在
# if retrieve_user is None:
# result = return_status(-1)
# return jsonify(result)
id = request.args.get('id')
if id is None:
return "信息不完整"
id = int(id)
size = request.args.get('size')
if size is None:
size = None
else:
size = str(size)
type = request.args.get('type')
if type is None:
type = None
else:
type = str(type)
print(type)
print(size)
path = WorkDAO().retrieve_address(id)
if size == 'mid':
if type == 'sketch':
path = path + '/sketch.jpg'
else:
if type is None or type == 'sketch':
path = path + '/work.jpg'
else:
return "信息不正确"
else:
if size is None:
if type == 'sketch':
path = path + '/sketch.jpg'
else:
if type is None or type == 'sketch':
path = path + '/work.jpg'
else:
return "信息不正确"
else:
return "信息不正确"
try:
with open(path, 'rb') as f:
image = f.read()
response = Response(image, mimetype='image/jpg')
return response
except:
return "Server Failure"
# 获取收藏作品
@app.route('/illustration/mylike', methods=['GET'])
def get_mylike():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
user_id = request.args.get('userid')
if user_id is None:
return "信息不完整"
user_id = int(user_id)
# 获取用户
user = User()
user.set_user_id(user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
my_like_work_ids = user_dao.get_my_like(retrieve_user)
my_like_works = WorkDAO().list(my_like_work_ids)
start = request.args.get('start')
if start is None:
return "信息不完整"
start = int(start)
count = request.args.get('count')
if count is None:
return "信息不完整"
count = int(count)
type = request.args.get('type')
if type is None:
return "信息不完整"
type = str(type)
if type == 'home':
result = return_home_my_like(my_like_works, start, count)
return jsonify(result)
if type == 'detail':
result = return_detail_my_like(my_like_works, start, count)
return jsonify(result)
else:
return "信息不正确"
# 收藏作品
@app.route('/illustration/mylike', methods=['POST'])
def like():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
data = request.get_json()
if 'id' not in data or 'Cancel' not in data:
return "信息缺失"
id = data['id']
cancel_like = data['Cancel']
work_dao = WorkDAO()
if cancel_like == 'True' or cancel_like == 'true' or cancel_like is True:
work_dao.delete_my_like(auth_user_id, id)
result = return_status(1)
return jsonify(result)
if cancel_like == 'False' or cancel_like == 'false' or cancel_like is False:
work_dao.add_my_like(auth_user_id, id)
result = return_status(0)
return jsonify(result)
else:
result = return_status(-1)
return jsonify(result)
# 获取作品详情
@app.route('/illustration/sketchwork', methods=['GET'])
def get_sketchwork():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
# user = User()
# user.set_user_id(auth_user_id)
# user_dao = UserDAO()
# try:
# retrieve_user = user_dao.retrieve(user)
# except:
# result = return_status(-2)
# return jsonify(result)
# 用户不存在
# if retrieve_user is None:
# result = return_status(-1)
# return jsonify(result)
id = request.args.get('id')
if id is None:
return "信息不完整"
id = int(id)
work_dao = WorkDAO()
try:
work = work_dao.retrieve_information(id)
result = return_detail_work(work)
return jsonify(result)
except:
return 'Server Failure'
# 搜索作品
@app.route('/illustration/search', methods=['GET'])
def search():
keywords = request.args.get('keywords')
keywords = str(keywords)
return "search"
# 获取受欢迎的线稿
@app.route('/illustration/favorite_sketch', methods=['GET'])
def get_favorite_sketch():
return 'get_favorite_sketch'
# 获取受欢迎的上色
@app.route('/illustration/favorite_colorization', methods=['GET'])
def get_favorite_colorization():
return 'get_favorite_colorization'
# 今日推荐作品
@app.route('/illustration/todays', methods=['GET'])
def get_todays():
return "get_todays"
# 发布作品
@app.route('/illustration/upload', methods=['POST'])
def upload():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
data = request.get_json()
if 'name' not in data or 'created' not in data or 'description' not in data or 'tags' not in data or 'allow_download' not in data or 'allow_sketch' not in data or 'allow_fork' not in data or 'original_image' not in data or 'colorization_image' not in data:
return '信息不完整'
work = Work()
work.set_artist(auth_user_id)
name = data['name']
name = str(name)
work.set_name(name)
created_time = data['created']
created_time = str(created_time)
work.set_created(created_time)
description = data['description']
description = str(description)
work.set_description(description)
tags = data['tags']
work.set_tags(tags)
allow_downloaded = data['allow_download']
allow_downloaded = bool(allow_downloaded)
work.set_allow_fork(allow_downloaded)
allow_sketch = data['allow_sketch']
allow_sketch = bool(allow_sketch)
work.set_allow_sketch(allow_sketch)
allow_fork = data['allow_fork']
allow_fork = bool(allow_fork)
work.set_allow_fork(allow_fork)
original_image = data['original_image']
original_image = str(original_image)
colorization_image = data['colorization_image']
colorization_image = str(colorization_image)
address = Address()
address.set_original_image(original_image)
address.set_colorization_image(colorization_image)
work_dao = WorkDAO()
try:
work_dao.add_work(work, address)
result = return_status(0)
return jsonify(result)
except:
result = return_status(-2)
return jsonify(result)
def get_request_image(image):
image = re.sub('^data:image/.+;base64,', '', image)
image = base64.urlsafe_b64decode(image)
image = np.fromstring(image, dtype=np.uint8)
image = cv2.imdecode(image, -1)
return image
# pool = []
# lock = 1
#
#
def handle_colorization(pool):
# mutex = threading.Lock()
# 锁定
# mutex.acquire(lock)
# print(len(pool))
# if len(pool) > 0:
# sketch, points, path = pool[0]
# del pool[0]
# else:
# return
# 释放
# mutex.release()
sketch, points, path = pool
improved_sketch = sketch.copy()
improved_sketch = min_resize(improved_sketch, 512)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = go_tail(improved_sketch)
std = cal_std(improved_sketch)
if std > 100.0:
improved_sketch = go_passline(improved_sketch)
improved_sketch = min_k_down_c(improved_sketch, 2)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = go_tail(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = min_black(improved_sketch)
improved_sketch = cv2.cvtColor(improved_sketch, cv2.COLOR_BGR2GRAY)
sketch_1024 = k_resize(improved_sketch, 64)
sketch_256 = mini_norm(k_resize(min_k_down(sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(sketch_1024, 4), 32))
baby = go_baby(sketch_128, opreate_normal_hint(ini_hint(sketch_128), points, type=0, length=1))
baby = de_line(baby, sketch_128)
for _ in range(16):
baby = blur_line(baby, sketch_128)
baby = go_tail(baby)
baby = clip_15(baby)
composition = go_gird(sketch=sketch_256, latent=d_resize(baby, sketch_256.shape), hint=ini_hint(sketch_256))
composition = go_tail(composition)
painting_function = go_head
reference = None
alpha = 0
result = painting_function(
sketch=sketch_1024,
global_hint=k_resize(composition, 14),
local_hint=opreate_normal_hint(ini_hint(sketch_1024), points, type=2, length=2),
global_hint_x=k_resize(reference, 14) if reference is not None else k_resize(composition, 14),
alpha=(1 - alpha) if reference is not None else 1
)
result = go_tail(result)
cv2.imwrite(path, result)
return
# 提交上色请求
@app.route('/illustration/colorization', methods=['POST'])
def colorization():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
# 获取用户
user = User()
user.set_user_id(auth_user_id)
user_dao = UserDAO()
try:
retrieve_user = user_dao.retrieve(user)
except:
result = return_status(-2)
return jsonify(result)
# 用户不存在
if retrieve_user is None:
result = return_status(-1)
return jsonify(result)
# 获取信息
data = request.get_json()
if 'image' not in data or 'points' not in data:
return "信息不完整"
image = data['image']
points = data['points']
for _ in range(len(points)):
points[_][1] = 1 - points[_][1]
# data = datas['data']
#
# anchor = data['anchor']
# anchor_x = anchor['x']
# anchor_y = anchor['y']
# anchor_color = anchor['color']
# print(anchor_x + ' ' + anchor_y + ' ' + anchor_color)
#
# hint = data['hint']
# 处理图片
try:
image = get_request_image(image)
image = from_png_to_jpg(image)
except:
result = return_status(-1)
return jsonify(result)
# 生成图片id
id = get_work_id()
path = 'works/' + str(auth_user_id) + '/' + str(id)
path = mkdir(path)
cv2.imwrite(path + '/sketch.jpg', image)
address = Address()
address.set_work_id(id)
address.set_user_id(auth_user_id)
address.set_path(path)
original_image = str(auth_user_id) + str(id) + '0'
address.set_original_image(original_image)
colorization_image = str(auth_user_id) + str(id) + '1'
address.set_colorization_image(colorization_image)
receipt = str(id) + 'r' + str(auth_user_id)
address.set_receipt(receipt)
address_dao = AddressDAO()
address_dao.add(address)
path = path + '/result.jpg'
pool = [image, points, path]
threading.Thread(target=handle_colorization, args=(pool, )).start()
# cv2.imwrite(path, image)
result = return_receipt(0, address)
return jsonify(result)
# 查询上色请求
@app.route('/illustration/colorization', methods=['GET'])
def get_receipt():
auth = request.headers.get('Authorization')
auth_user_id = Auth.identify(auth)
# Authorization header不正确
if auth_user_id is None:
result = return_status(-2)
return jsonify(result)
receipt = request.args.get('receipt')
if receipt is None:
return '信息不完整'
receipt = str(receipt)
address = Address()
address.set_receipt(receipt)
address_dao = AddressDAO()
address = address_dao.retrieve(address)
if address is None:
result = return_status(-1)
return jsonify(result)
path = address.get_path() + '/result.jpg'
flag = os.path.exists(path)
if flag:
result = return_load(0, address)
return jsonify(result)
else:
result = return_status(1)
return jsonify(result)
# def handle_threading():
# while True:
# try:
# handle_colorization()
# except Exception as e:
# print(e)
# threading.Thread(target=handle_threading).start()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, threaded=True)
| [
"cv2.imdecode",
"pojo.User.User",
"pojo.Image.Work",
"auth.Auth.Auth.identify",
"random.randint",
"dao.addressDAO.AddressDAO",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"base64.urlsafe_b64decode",
"pojo.Information.Information",
"re.sub",
"numpy.fromstring",
"dao.FollowerDAO.Followe... | [((1059, 1065), 'pojo.User.User', 'User', ([], {}), '()\n', (1063, 1065), False, 'from pojo.User import User\n'), ((1342, 1362), 'auth.Auth.Auth.authorize', 'Auth.authorize', (['user'], {}), '(user)\n', (1356, 1362), False, 'from auth.Auth import Auth\n'), ((2141, 2147), 'pojo.User.User', 'User', ([], {}), '()\n', (2145, 2147), False, 'from pojo.User import User\n'), ((2778, 2797), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (2791, 2797), False, 'from auth.Auth import Auth\n'), ((2985, 2998), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (2996, 2998), False, 'from pojo.Information import Information\n'), ((3740, 3759), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (3753, 3759), False, 'from auth.Auth import Auth\n'), ((3909, 3915), 'pojo.User.User', 'User', ([], {}), '()\n', (3913, 3915), False, 'from pojo.User import User\n'), ((3966, 3975), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (3973, 3975), False, 'from dao.UserDAO import UserDAO\n'), ((4239, 4252), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (4250, 4252), False, 'from pojo.Information import Information\n'), ((5032, 5048), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (5046, 5048), False, 'from dao.InformationDAO import InformationDAO\n'), ((5207, 5234), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (5221, 5234), False, 'import os\n'), ((5452, 5471), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (5465, 5471), False, 'from auth.Auth import Auth\n'), ((5621, 5627), 'pojo.User.User', 'User', ([], {}), '()\n', (5625, 5627), False, 'from pojo.User import User\n'), ((5678, 5687), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (5685, 5687), False, 'from dao.UserDAO import UserDAO\n'), ((6034, 6047), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (6045, 6047), False, 'from pojo.Information import Information\n'), ((6454, 6470), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (6468, 6470), False, 'from dao.InformationDAO import InformationDAO\n'), ((6972, 6991), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (6985, 6991), False, 'from auth.Auth import Auth\n'), ((7141, 7147), 'pojo.User.User', 'User', ([], {}), '()\n', (7145, 7147), False, 'from pojo.User import User\n'), ((7198, 7207), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (7205, 7207), False, 'from dao.UserDAO import UserDAO\n'), ((7558, 7571), 'pojo.Information.Information', 'Information', ([], {}), '()\n', (7569, 7571), False, 'from pojo.Information import Information\n'), ((7998, 8014), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (8012, 8014), False, 'from dao.InformationDAO import InformationDAO\n'), ((8521, 8540), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (8534, 8540), False, 'from auth.Auth import Auth\n'), ((8690, 8696), 'pojo.User.User', 'User', ([], {}), '()\n', (8694, 8696), False, 'from pojo.User import User\n'), ((8747, 8756), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (8754, 8756), False, 'from dao.UserDAO import UserDAO\n'), ((9022, 9036), 'dao.FollowingDAO.FollowingDAO', 'FollowingDAO', ([], {}), '()\n', (9034, 9036), False, 'from dao.FollowingDAO import FollowingDAO\n'), ((9414, 9433), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (9427, 9433), False, 'from auth.Auth import Auth\n'), ((9583, 9589), 'pojo.User.User', 'User', ([], {}), '()\n', (9587, 9589), False, 'from pojo.User import User\n'), ((9640, 9649), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (9647, 9649), False, 'from dao.UserDAO import UserDAO\n'), ((10083, 10094), 'dao.FollowDAO.FollowDAO', 'FollowDAO', ([], {}), '()\n', (10092, 10094), False, 'from dao.FollowDAO import FollowDAO\n'), ((10708, 10727), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (10721, 10727), False, 'from auth.Auth import Auth\n'), ((10877, 10883), 'pojo.User.User', 'User', ([], {}), '()\n', (10881, 10883), False, 'from pojo.User import User\n'), ((10934, 10943), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (10941, 10943), False, 'from dao.UserDAO import UserDAO\n'), ((11208, 11221), 'dao.FollowerDAO.FollowerDAO', 'FollowerDAO', ([], {}), '()\n', (11219, 11221), False, 'from dao.FollowerDAO import FollowerDAO\n'), ((11500, 11540), 'random.randint', 'random.randint', (['(10000000000)', '(99999999999)'], {}), '(10000000000, 99999999999)\n', (11514, 11540), False, 'import random\n'), ((11690, 11709), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (11703, 11709), False, 'from auth.Auth import Auth\n'), ((11948, 11954), 'pojo.User.User', 'User', ([], {}), '()\n', (11952, 11954), False, 'from pojo.User import User\n'), ((12000, 12009), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (12007, 12009), False, 'from dao.UserDAO import UserDAO\n'), ((12469, 12478), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (12476, 12478), False, 'from dao.ImageDAO import WorkDAO\n'), ((13044, 13063), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (13057, 13063), False, 'from auth.Auth import Auth\n'), ((14867, 14886), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (14880, 14886), False, 'from auth.Auth import Auth\n'), ((15152, 15158), 'pojo.User.User', 'User', ([], {}), '()\n', (15156, 15158), False, 'from pojo.User import User\n'), ((15204, 15213), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (15211, 15213), False, 'from dao.UserDAO import UserDAO\n'), ((16307, 16326), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (16320, 16326), False, 'from auth.Auth import Auth\n'), ((16476, 16482), 'pojo.User.User', 'User', ([], {}), '()\n', (16480, 16482), False, 'from pojo.User import User\n'), ((16533, 16542), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (16540, 16542), False, 'from dao.UserDAO import UserDAO\n'), ((16959, 16968), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (16966, 16968), False, 'from dao.ImageDAO import WorkDAO\n'), ((17587, 17606), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (17600, 17606), False, 'from auth.Auth import Auth\n'), ((18197, 18206), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (18204, 18206), False, 'from dao.ImageDAO import WorkDAO\n'), ((19084, 19103), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (19097, 19103), False, 'from auth.Auth import Auth\n'), ((19253, 19259), 'pojo.User.User', 'User', ([], {}), '()\n', (19257, 19259), False, 'from pojo.User import User\n'), ((19310, 19319), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (19317, 19319), False, 'from dao.UserDAO import UserDAO\n'), ((19891, 19897), 'pojo.Image.Work', 'Work', ([], {}), '()\n', (19895, 19897), False, 'from pojo.Image import Work\n'), ((20835, 20844), 'pojo.address.Address', 'Address', ([], {}), '()\n', (20842, 20844), False, 'from pojo.address import Address\n'), ((20963, 20972), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (20970, 20972), False, 'from dao.ImageDAO import WorkDAO\n'), ((21210, 21253), 're.sub', 're.sub', (['"""^data:image/.+;base64,"""', '""""""', 'image'], {}), "('^data:image/.+;base64,', '', image)\n", (21216, 21253), False, 'import re\n'), ((21266, 21297), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['image'], {}), '(image)\n', (21290, 21297), False, 'import base64\n'), ((21310, 21346), 'numpy.fromstring', 'np.fromstring', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (21323, 21346), True, 'import numpy as np\n'), ((21359, 21382), 'cv2.imdecode', 'cv2.imdecode', (['image', '(-1)'], {}), '(image, -1)\n', (21371, 21382), False, 'import cv2\n'), ((22379, 22428), 'cv2.cvtColor', 'cv2.cvtColor', (['improved_sketch', 'cv2.COLOR_BGR2GRAY'], {}), '(improved_sketch, cv2.COLOR_BGR2GRAY)\n', (22391, 22428), False, 'import cv2\n'), ((23488, 23513), 'cv2.imwrite', 'cv2.imwrite', (['path', 'result'], {}), '(path, result)\n', (23499, 23513), False, 'import cv2\n'), ((23682, 23701), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (23695, 23701), False, 'from auth.Auth import Auth\n'), ((23851, 23857), 'pojo.User.User', 'User', ([], {}), '()\n', (23855, 23857), False, 'from pojo.User import User\n'), ((23908, 23917), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (23915, 23917), False, 'from dao.UserDAO import UserDAO\n'), ((24960, 25000), 'cv2.imwrite', 'cv2.imwrite', (["(path + '/sketch.jpg')", 'image'], {}), "(path + '/sketch.jpg', image)\n", (24971, 25000), False, 'import cv2\n'), ((25016, 25025), 'pojo.address.Address', 'Address', ([], {}), '()\n', (25023, 25025), False, 'from pojo.address import Address\n'), ((25435, 25447), 'dao.addressDAO.AddressDAO', 'AddressDAO', ([], {}), '()\n', (25445, 25447), False, 'from dao.addressDAO import AddressDAO\n'), ((25869, 25888), 'auth.Auth.Auth.identify', 'Auth.identify', (['auth'], {}), '(auth)\n', (25882, 25888), False, 'from auth.Auth import Auth\n'), ((26147, 26156), 'pojo.address.Address', 'Address', ([], {}), '()\n', (26154, 26156), False, 'from pojo.address import Address\n'), ((26209, 26221), 'dao.addressDAO.AddressDAO', 'AddressDAO', ([], {}), '()\n', (26219, 26221), False, 'from dao.addressDAO import AddressDAO\n'), ((26415, 26435), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (26429, 26435), False, 'import os\n'), ((2235, 2244), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (2242, 2244), False, 'from dao.UserDAO import UserDAO\n'), ((5262, 5286), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (5273, 5286), False, 'import os\n'), ((13908, 13917), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (13915, 13917), False, 'from dao.ImageDAO import WorkDAO\n'), ((15539, 15548), 'dao.ImageDAO.WorkDAO', 'WorkDAO', ([], {}), '()\n', (15546, 15548), False, 'from dao.ImageDAO import WorkDAO\n'), ((25547, 25605), 'threading.Thread', 'threading.Thread', ([], {'target': 'handle_colorization', 'args': '(pool,)'}), '(target=handle_colorization, args=(pool,))\n', (25563, 25605), False, 'import threading\n'), ((1149, 1158), 'dao.UserDAO.UserDAO', 'UserDAO', ([], {}), '()\n', (1156, 1158), False, 'from dao.UserDAO import UserDAO\n'), ((3208, 3224), 'dao.InformationDAO.InformationDAO', 'InformationDAO', ([], {}), '()\n', (3222, 3224), False, 'from dao.InformationDAO import InformationDAO\n')] |
# ------------------------------------------------------------------------
# Copyright (c) 2021 4669 (for eccv submission only). All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016-2020 <NAME> <EMAIL>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import os
import numpy as np
import random
import argparse
import torchvision.transforms.functional as F
import torch
import cv2
from tqdm import tqdm
from pathlib import Path
from PIL import Image, ImageDraw
from models import build_model
from util.tool import load_model
from main import get_args_parser
from torch.nn.functional import interpolate
from typing import List
from util.evaluation import Evaluator
import motmetrics as mm
import shutil
from models.structures import Instances
from torch.utils.data import Dataset, DataLoader
np.random.seed(2020)
COLORS_10 = [(144, 238, 144), (178, 34, 34), (221, 160, 221), (0, 255, 0), (0, 128, 0), (210, 105, 30), (220, 20, 60),
(192, 192, 192), (255, 228, 196), (50, 205, 50), (139, 0, 139), (100, 149, 237), (138, 43, 226),
(238, 130, 238),
(255, 0, 255), (0, 100, 0), (127, 255, 0), (255, 0, 255), (0, 0, 205), (255, 140, 0), (255, 239, 213),
(199, 21, 133), (124, 252, 0), (147, 112, 219), (106, 90, 205), (176, 196, 222), (65, 105, 225),
(173, 255, 47),
(255, 20, 147), (219, 112, 147), (186, 85, 211), (199, 21, 133), (148, 0, 211), (255, 99, 71),
(144, 238, 144),
(255, 255, 0), (230, 230, 250), (0, 0, 255), (128, 128, 0), (189, 183, 107), (255, 255, 224),
(128, 128, 128),
(105, 105, 105), (64, 224, 208), (205, 133, 63), (0, 128, 128), (72, 209, 204), (139, 69, 19),
(255, 245, 238),
(250, 240, 230), (152, 251, 152), (0, 255, 255), (135, 206, 235), (0, 191, 255), (176, 224, 230),
(0, 250, 154),
(245, 255, 250), (240, 230, 140), (245, 222, 179), (0, 139, 139), (143, 188, 143), (255, 0, 0),
(240, 128, 128),
(102, 205, 170), (60, 179, 113), (46, 139, 87), (165, 42, 42), (178, 34, 34), (175, 238, 238),
(255, 248, 220),
(218, 165, 32), (255, 250, 240), (253, 245, 230), (244, 164, 96), (210, 105, 30)]
def plot_one_box(x, img, color=None, label=None, score=None, line_thickness=None):
# Plots one bounding box on image img
# tl = line_thickness or round(
# 0.002 * max(img.shape[0:2])) + 1 # line thickness
tl = 2
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img,
label, (c1[0], c1[1] - 2),
0,
tl / 3, [225, 255, 255],
thickness=tf,
lineType=cv2.LINE_AA)
if score is not None:
cv2.putText(img, score, (c1[0], c1[1] + 30), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return img
'''
deep sort 中的画图方法,在原图上进行作画
'''
def draw_bboxes(ori_img, bbox, identities=None, offset=(0, 0), cvt_color=False):
if cvt_color:
ori_img = cv2.cvtColor(np.asarray(ori_img), cv2.COLOR_RGB2BGR)
img = ori_img
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box[:4]]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
if len(box) > 4:
score = '{:.2f}'.format(box[4])
else:
score = None
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = COLORS_10[id % len(COLORS_10)]
label = '{:d}'.format(id)
# t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
img = plot_one_box([x1, y1, x2, y2], img, color, label, score=score)
return img
def draw_points(img: np.ndarray, points: np.ndarray, color=(255, 255, 255)) -> np.ndarray:
assert len(points.shape) == 2 and points.shape[1] == 2, 'invalid points shape: {}'.format(points.shape)
for i, (x, y) in enumerate(points):
if i >= 300:
color = (0, 255, 0)
cv2.circle(img, (int(x), int(y)), 2, color=color, thickness=2)
return img
def tensor_to_numpy(tensor: torch.Tensor) -> np.ndarray:
return tensor.detach().cpu().numpy()
class Track(object):
track_cnt = 0
def __init__(self, box):
self.box = box
self.time_since_update = 0
self.id = Track.track_cnt
Track.track_cnt += 1
self.miss = 0
def miss_one_frame(self):
self.miss += 1
def clear_miss(self):
self.miss = 0
def update(self, box):
self.box = box
self.clear_miss()
class MOTR(object):
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.iou_threshold = iou_threshold
self.trackers = []
self.frame_count = 0
self.active_trackers = {}
self.inactive_trackers = {}
self.disappeared_tracks = []
def _remove_track(self, slot_id):
self.inactive_trackers.pop(slot_id)
self.disappeared_tracks.append(slot_id)
def clear_disappeared_track(self):
self.disappeared_tracks = []
def update(self, dt_instances: Instances):
"""
Params:
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
self.frame_count += 1
# get predicted locations from existing trackers.
dt_idxes = set(dt_instances.obj_idxes.tolist())
track_idxes = set(self.active_trackers.keys()).union(set(self.inactive_trackers.keys()))
matched_idxes = dt_idxes.intersection(track_idxes)
unmatched_tracker = track_idxes - matched_idxes
for track_id in unmatched_tracker:
# miss in this frame, move to inactive_trackers.
if track_id in self.active_trackers:
self.inactive_trackers[track_id] = self.active_trackers.pop(track_id)
self.inactive_trackers[track_id].miss_one_frame()
if self.inactive_trackers[track_id].miss > 10:
self._remove_track(track_id)
for i in range(len(dt_instances)):
idx = dt_instances.obj_idxes[i]
bbox = np.concatenate([dt_instances.boxes[i], dt_instances.scores[i:i+1]], axis=-1)
label = dt_instances.labels[i]
if label == 0:
# get a positive track.
if idx in self.inactive_trackers:
# set state of track active.
self.active_trackers[idx] = self.inactive_trackers.pop(idx)
if idx not in self.active_trackers:
# create a new track.
self.active_trackers[idx] = Track(idx)
self.active_trackers[idx].update(bbox)
elif label == 1:
# get an occluded track.
if idx in self.active_trackers:
# set state of track inactive.
self.inactive_trackers[idx] = self.active_trackers.pop(idx)
if idx not in self.inactive_trackers:
# It's strange to obtain a new occluded track.
# TODO: think more rational disposal.
self.inactive_trackers[idx] = Track(idx)
self.inactive_trackers[idx].miss_one_frame()
if self.inactive_trackers[idx].miss > 10:
self._remove_track(idx)
ret = []
for i in range(len(dt_instances)):
label = dt_instances.labels[i]
if label == 0:
id = dt_instances.obj_idxes[i]
box_with_score = np.concatenate([dt_instances.boxes[i], dt_instances.scores[i:i+1]], axis=-1)
ret.append(np.concatenate((box_with_score, [id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive
if len(ret) > 0:
return np.concatenate(ret)
return np.empty((0, 6))
def load_label(label_path: str, img_size: tuple) -> dict:
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
h, w = img_size
# Normalized cewh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = w * (labels0[:, 2] - labels0[:, 4] / 2)
labels[:, 3] = h * (labels0[:, 3] - labels0[:, 5] / 2)
labels[:, 4] = w * (labels0[:, 2] + labels0[:, 4] / 2)
labels[:, 5] = h * (labels0[:, 3] + labels0[:, 5] / 2)
targets = {'boxes': [], 'labels': [], 'area': []}
num_boxes = len(labels)
visited_ids = set()
for label in labels[:num_boxes]:
obj_id = label[1]
if obj_id in visited_ids:
continue
visited_ids.add(obj_id)
targets['boxes'].append(label[2:6].tolist())
targets['area'].append(label[4] * label[5])
targets['labels'].append(0)
targets['boxes'] = np.asarray(targets['boxes'])
targets['area'] = np.asarray(targets['area'])
targets['labels'] = np.asarray(targets['labels'])
return targets
def filter_pub_det(res_file, pub_det_file, filter_iou=False):
frame_boxes = {}
with open(pub_det_file, 'r') as f:
lines = f.readlines()
for line in lines:
if len(line) == 0:
continue
elements = line.strip().split(',')
frame_id = int(elements[0])
x1, y1, w, h = elements[2:6]
x1, y1, w, h = float(x1), float(y1), float(w), float(h)
x2 = x1 + w - 1
y2 = y1 + h - 1
if frame_id not in frame_boxes:
frame_boxes[frame_id] = []
frame_boxes[frame_id].append([x1, y1, x2, y2])
for frame, boxes in frame_boxes.items():
frame_boxes[frame] = np.array(boxes)
ids = {}
num_filter_box = 0
with open(res_file, 'r') as f:
lines = f.readlines()
with open(res_file, 'w') as f:
for line in lines:
if len(line) == 0:
continue
elements = line.strip().split(',')
frame_id, obj_id = elements[:2]
frame_id = int(frame_id)
obj_id = int(obj_id)
x1, y1, w, h = elements[2:6]
x1, y1, w, h = float(x1), float(y1), float(w), float(h)
x2 = x1 + w - 1
y2 = y1 + h - 1
if obj_id not in ids:
# track initialization.
if frame_id not in frame_boxes:
num_filter_box += 1
print("filter init box {} {}".format(frame_id, obj_id))
continue
pub_dt_boxes = frame_boxes[frame_id]
dt_box = np.array([[x1, y1, x2, y2]])
if filter_iou:
max_iou = bbox_iou(dt_box, pub_dt_boxes).max()
if max_iou < 0.5:
num_filter_box += 1
print("filter init box {} {}".format(frame_id, obj_id))
continue
else:
pub_dt_centers = (pub_dt_boxes[:, :2] + pub_dt_boxes[:, 2:4]) * 0.5
x_inside = (dt_box[0, 0] <= pub_dt_centers[:, 0]) & (dt_box[0, 2] >= pub_dt_centers[:, 0])
y_inside = (dt_box[0, 1] <= pub_dt_centers[:, 1]) & (dt_box[0, 3] >= pub_dt_centers[:, 1])
center_inside:np.ndarray = x_inside & y_inside
if not center_inside.any():
num_filter_box += 1
print("filter init box {} {}".format(frame_id, obj_id))
continue
print("save init track {} {}".format(frame_id, obj_id))
ids[obj_id] = True
f.write(line)
print("totally {} boxes are filtered.".format(num_filter_box))
class ListImgDataset(Dataset):
def __init__(self, img_list) -> None:
super().__init__()
self.img_list = img_list
'''
common settings
'''
self.img_height = 800
self.img_width = 1536
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def load_img_from_file(self, f_path):
label_path = f_path.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
cur_img = cv2.imread(f_path)
assert cur_img is not None, f_path
cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
targets = load_label(label_path, cur_img.shape[:2]) if os.path.exists(label_path) else None
return cur_img, targets
def init_img(self, img):
ori_img = img.copy()
self.seq_h, self.seq_w = img.shape[:2]
scale = self.img_height / min(self.seq_h, self.seq_w)
if max(self.seq_h, self.seq_w) * scale > self.img_width:
scale = self.img_width / max(self.seq_h, self.seq_w)
target_h = int(self.seq_h * scale)
target_w = int(self.seq_w * scale)
img = cv2.resize(img, (target_w, target_h))
img = F.normalize(F.to_tensor(img), self.mean, self.std)
img = img.unsqueeze(0)
return img, ori_img
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img, targets = self.load_img_from_file(self.img_list[index])
return self.init_img(img)
class Detector(object):
def __init__(self, args, model=None, seq_num=2):
self.args = args
self.detr = model
self.seq_num = seq_num
img_list = os.listdir(os.path.join(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1'))
img_list = [os.path.join(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1', _) for _ in img_list if
('jpg' in _) or ('png' in _)]
self.img_list = sorted(img_list)
self.img_len = len(self.img_list)
self.tr_tracker = MOTR()
self.save_path = os.path.join(self.args.output_dir, 'results/{}'.format(seq_num))
os.makedirs(self.save_path, exist_ok=True)
self.predict_path = os.path.join(self.args.output_dir, args.exp_name)
os.makedirs(self.predict_path, exist_ok=True)
if os.path.exists(os.path.join(self.predict_path, f'{self.seq_num}.txt')):
os.remove(os.path.join(self.predict_path, f'{self.seq_num}.txt'))
@staticmethod
def filter_dt_by_score(dt_instances: Instances, prob_threshold: float) -> Instances:
keep = dt_instances.scores > prob_threshold
return dt_instances[keep]
@staticmethod
def filter_dt_by_area(dt_instances: Instances, area_threshold: float) -> Instances:
wh = dt_instances.boxes[:, 2:4] - dt_instances.boxes[:, 0:2]
areas = wh[:, 0] * wh[:, 1]
keep = areas > area_threshold
return dt_instances[keep]
@staticmethod
def write_results(txt_path, frame_id, bbox_xyxy, identities):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
with open(txt_path, 'a') as f:
for xyxy, track_id in zip(bbox_xyxy, identities):
if track_id < 0 or track_id is None:
continue
x1, y1, x2, y2 = xyxy
w, h = x2 - x1, y2 - y1
line = save_format.format(frame=int(frame_id), id=int(track_id), x1=x1, y1=y1, w=w, h=h)
f.write(line)
def eval_seq(self):
data_root = os.path.join(self.args.mot_path, 'MOT15/images/train')
result_filename = os.path.join(self.predict_path, 'gt.txt')
evaluator = Evaluator(data_root, self.seq_num)
accs = evaluator.eval_file(result_filename)
return accs
@staticmethod
def visualize_img_with_bbox(img_path, img, dt_instances: Instances, ref_pts=None, gt_boxes=None):
if dt_instances.has('scores'):
img_show = draw_bboxes(img, np.concatenate([dt_instances.boxes, dt_instances.scores.reshape(-1, 1)], axis=-1), dt_instances.obj_idxes)
else:
img_show = draw_bboxes(img, dt_instances.boxes, dt_instances.obj_idxes)
if ref_pts is not None:
img_show = draw_points(img_show, ref_pts)
if gt_boxes is not None:
img_show = draw_bboxes(img_show, gt_boxes, identities=np.ones((len(gt_boxes), )) * -1)
cv2.imwrite(img_path, img_show)
def detect(self, prob_threshold=0.7, area_threshold=100, vis=False):
last_dt_embedding = None
total_dts = 0
total_occlusion_dts = 0
track_instances = None
loader = DataLoader(ListImgDataset(self.img_list), 1, num_workers=2)
with open(os.path.join(self.predict_path, 'gt.txt'), 'w'):
pass
for i, (cur_img, ori_img) in enumerate(tqdm(loader)):
cur_img, ori_img = cur_img[0], ori_img[0]
# track_instances = None
if track_instances is not None:
track_instances.remove('boxes')
track_instances.remove('labels')
seq_h, seq_w, _ = ori_img.shape
res = self.detr.inference_single_image(cur_img.cuda().float(), (seq_h, seq_w), track_instances)
track_instances = res['track_instances']
all_ref_pts = tensor_to_numpy(res['ref_pts'][0, :, :2])
dt_instances = track_instances.to(torch.device('cpu'))
# filter det instances by score.
dt_instances = self.filter_dt_by_score(dt_instances, prob_threshold)
dt_instances = self.filter_dt_by_area(dt_instances, area_threshold)
total_dts += len(dt_instances)
if vis:
# for visual
cur_vis_img_path = os.path.join(self.save_path, 'frame_{}.jpg'.format(i))
gt_boxes = None
self.visualize_img_with_bbox(cur_vis_img_path, ori_img, dt_instances, ref_pts=all_ref_pts, gt_boxes=gt_boxes)
tracker_outputs = self.tr_tracker.update(dt_instances)
self.write_results(txt_path=os.path.join(self.predict_path, f'{self.seq_num}.txt'),
frame_id=(i + 1),
bbox_xyxy=tracker_outputs[:, :4],
identities=tracker_outputs[:, 5])
print("totally {} dts {} occlusion dts".format(total_dts, total_occlusion_dts))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
# load model and weights
detr, _, _ = build_model(args)
checkpoint = torch.load(args.resume, map_location='cpu')
detr = load_model(detr, args.resume)
detr.eval()
detr = detr.cuda()
# '''for MOT17 submit'''
sub_dir = 'DanceTrack/test'
seq_nums = os.listdir(os.path.join(args.mot_path, sub_dir))
for seq_num in seq_nums:
det = Detector(args, model=detr, seq_num=seq_num)
det.detect()
| [
"numpy.random.seed",
"torchvision.transforms.functional.to_tensor",
"numpy.empty",
"pathlib.Path",
"cv2.rectangle",
"torch.device",
"os.path.join",
"random.randint",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"os.path.exists",
"numpy.loadtxt",
"util.tool.load_model",
"models.build_mode... | [((1980, 2000), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (1994, 2000), True, 'import numpy as np\n'), ((3804, 3851), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color'], {'thickness': 'tl'}), '(img, c1, c2, color, thickness=tl)\n', (3817, 3851), False, 'import cv2\n'), ((10835, 10863), 'numpy.asarray', 'np.asarray', (["targets['boxes']"], {}), "(targets['boxes'])\n", (10845, 10863), True, 'import numpy as np\n'), ((10886, 10913), 'numpy.asarray', 'np.asarray', (["targets['area']"], {}), "(targets['area'])\n", (10896, 10913), True, 'import numpy as np\n'), ((10938, 10967), 'numpy.asarray', 'np.asarray', (["targets['labels']"], {}), "(targets['labels'])\n", (10948, 10967), True, 'import numpy as np\n'), ((20491, 20508), 'models.build_model', 'build_model', (['args'], {}), '(args)\n', (20502, 20508), False, 'from models import build_model\n'), ((20526, 20569), 'torch.load', 'torch.load', (['args.resume'], {'map_location': '"""cpu"""'}), "(args.resume, map_location='cpu')\n", (20536, 20569), False, 'import torch\n'), ((20581, 20610), 'util.tool.load_model', 'load_model', (['detr', 'args.resume'], {}), '(detr, args.resume)\n', (20591, 20610), False, 'from util.tool import load_model\n'), ((4052, 4089), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color', '(-1)'], {}), '(img, c1, c2, color, -1)\n', (4065, 4089), False, 'import cv2\n'), ((4108, 4219), 'cv2.putText', 'cv2.putText', (['img', 'label', '(c1[0], c1[1] - 2)', '(0)', '(tl / 3)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (4119, 4219), False, 'import cv2\n'), ((9940, 9956), 'numpy.empty', 'np.empty', (['(0, 6)'], {}), '((0, 6))\n', (9948, 9956), True, 'import numpy as np\n'), ((11697, 11712), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (11705, 11712), True, 'import numpy as np\n'), ((14231, 14249), 'cv2.imread', 'cv2.imread', (['f_path'], {}), '(f_path)\n', (14241, 14249), False, 'import cv2\n'), ((14311, 14351), 'cv2.cvtColor', 'cv2.cvtColor', (['cur_img', 'cv2.COLOR_BGR2RGB'], {}), '(cur_img, cv2.COLOR_BGR2RGB)\n', (14323, 14351), False, 'import cv2\n'), ((14882, 14919), 'cv2.resize', 'cv2.resize', (['img', '(target_w, target_h)'], {}), '(img, (target_w, target_h))\n', (14892, 14919), False, 'import cv2\n'), ((15896, 15938), 'os.makedirs', 'os.makedirs', (['self.save_path'], {'exist_ok': '(True)'}), '(self.save_path, exist_ok=True)\n', (15907, 15938), False, 'import os\n'), ((15968, 16017), 'os.path.join', 'os.path.join', (['self.args.output_dir', 'args.exp_name'], {}), '(self.args.output_dir, args.exp_name)\n', (15980, 16017), False, 'import os\n'), ((16026, 16071), 'os.makedirs', 'os.makedirs', (['self.predict_path'], {'exist_ok': '(True)'}), '(self.predict_path, exist_ok=True)\n', (16037, 16071), False, 'import os\n'), ((17305, 17359), 'os.path.join', 'os.path.join', (['self.args.mot_path', '"""MOT15/images/train"""'], {}), "(self.args.mot_path, 'MOT15/images/train')\n", (17317, 17359), False, 'import os\n'), ((17386, 17427), 'os.path.join', 'os.path.join', (['self.predict_path', '"""gt.txt"""'], {}), "(self.predict_path, 'gt.txt')\n", (17398, 17427), False, 'import os\n'), ((17448, 17482), 'util.evaluation.Evaluator', 'Evaluator', (['data_root', 'self.seq_num'], {}), '(data_root, self.seq_num)\n', (17457, 17482), False, 'from util.evaluation import Evaluator\n'), ((18186, 18217), 'cv2.imwrite', 'cv2.imwrite', (['img_path', 'img_show'], {}), '(img_path, img_show)\n', (18197, 18217), False, 'import cv2\n'), ((20739, 20775), 'os.path.join', 'os.path.join', (['args.mot_path', 'sub_dir'], {}), '(args.mot_path, sub_dir)\n', (20751, 20775), False, 'import os\n'), ((3698, 3720), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3712, 3720), False, 'import random\n'), ((3929, 3986), 'cv2.getTextSize', 'cv2.getTextSize', (['label', '(0)'], {'fontScale': '(tl / 3)', 'thickness': 'tf'}), '(label, 0, fontScale=tl / 3, thickness=tf)\n', (3944, 3986), False, 'import cv2\n'), ((4358, 4470), 'cv2.putText', 'cv2.putText', (['img', 'score', '(c1[0], c1[1] + 30)', '(0)', '(tl / 3)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, score, (c1[0], c1[1] + 30), 0, tl / 3, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (4369, 4470), False, 'import cv2\n'), ((4648, 4667), 'numpy.asarray', 'np.asarray', (['ori_img'], {}), '(ori_img)\n', (4658, 4667), True, 'import numpy as np\n'), ((8219, 8297), 'numpy.concatenate', 'np.concatenate', (['[dt_instances.boxes[i], dt_instances.scores[i:i + 1]]'], {'axis': '(-1)'}), '([dt_instances.boxes[i], dt_instances.scores[i:i + 1]], axis=-1)\n', (8233, 8297), True, 'import numpy as np\n'), ((9905, 9924), 'numpy.concatenate', 'np.concatenate', (['ret'], {}), '(ret)\n', (9919, 9924), True, 'import numpy as np\n'), ((10031, 10071), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.float32'}), '(label_path, dtype=np.float32)\n', (10041, 10071), True, 'import numpy as np\n'), ((14415, 14441), 'os.path.exists', 'os.path.exists', (['label_path'], {}), '(label_path)\n', (14429, 14441), False, 'import os\n'), ((14946, 14962), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['img'], {}), '(img)\n', (14957, 14962), True, 'import torchvision.transforms.functional as F\n'), ((15437, 15510), 'os.path.join', 'os.path.join', (['self.args.mot_path', '"""DanceTrack/test"""', 'self.seq_num', '"""img1"""'], {}), "(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1')\n", (15449, 15510), False, 'import os\n'), ((15532, 15608), 'os.path.join', 'os.path.join', (['self.args.mot_path', '"""DanceTrack/test"""', 'self.seq_num', '"""img1"""', '_'], {}), "(self.args.mot_path, 'DanceTrack/test', self.seq_num, 'img1', _)\n", (15544, 15608), False, 'import os\n'), ((16098, 16152), 'os.path.join', 'os.path.join', (['self.predict_path', 'f"""{self.seq_num}.txt"""'], {}), "(self.predict_path, f'{self.seq_num}.txt')\n", (16110, 16152), False, 'import os\n'), ((18619, 18631), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (18623, 18631), False, 'from tqdm import tqdm\n'), ((9656, 9734), 'numpy.concatenate', 'np.concatenate', (['[dt_instances.boxes[i], dt_instances.scores[i:i + 1]]'], {'axis': '(-1)'}), '([dt_instances.boxes[i], dt_instances.scores[i:i + 1]], axis=-1)\n', (9670, 9734), True, 'import numpy as np\n'), ((12604, 12632), 'numpy.array', 'np.array', (['[[x1, y1, x2, y2]]'], {}), '([[x1, y1, x2, y2]])\n', (12612, 12632), True, 'import numpy as np\n'), ((16177, 16231), 'os.path.join', 'os.path.join', (['self.predict_path', 'f"""{self.seq_num}.txt"""'], {}), "(self.predict_path, f'{self.seq_num}.txt')\n", (16189, 16231), False, 'import os\n'), ((18506, 18547), 'os.path.join', 'os.path.join', (['self.predict_path', '"""gt.txt"""'], {}), "(self.predict_path, 'gt.txt')\n", (18518, 18547), False, 'import os\n'), ((19188, 19207), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (19200, 19207), False, 'import torch\n'), ((20304, 20321), 'main.get_args_parser', 'get_args_parser', ([], {}), '()\n', (20319, 20321), False, 'from main import get_args_parser\n'), ((20387, 20408), 'pathlib.Path', 'Path', (['args.output_dir'], {}), '(args.output_dir)\n', (20391, 20408), False, 'from pathlib import Path\n'), ((19866, 19920), 'os.path.join', 'os.path.join', (['self.predict_path', 'f"""{self.seq_num}.txt"""'], {}), "(self.predict_path, f'{self.seq_num}.txt')\n", (19878, 19920), False, 'import os\n'), ((9760, 9802), 'numpy.concatenate', 'np.concatenate', (['(box_with_score, [id + 1])'], {}), '((box_with_score, [id + 1]))\n', (9774, 9802), True, 'import numpy as np\n')] |
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize
from pyddem.volint_tools import neff_circ, std_err
import functools
import matplotlib.ticker as mtick
from mpl_toolkits.axes_grid.inset_locator import inset_axes
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
all_csv = '/home/atom/ongoing/work_worldwide/validation/tcorr/tinterp_corr_deseas_agg_all.csv'
# all_csv = '/home/atom/ongoing/work_worldwide/validation/tinterp_corr_agg_all.csv'
df = pd.read_csv(all_csv)
# df = df[df.reg==5]
cutoffs = list(set(list(df.cutoff)))
dts = sorted(list(set(list(df.nb_dt))))
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
#plot covar by lag
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(df_c.bins.values[1],df_c.exp.values[1],color=col[dts.index(dt)],label=str(dt))
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# elif cutoff == 100000:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# else:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
#
# plt.ylim([0,50])
# plt.xscale('log')
# plt.legend()
#plot covar by dt
dts = sorted(dts)
dts.remove(540.)
dts.remove(900.)
dts.remove(1750.)
dts.remove(2250.)
arr_res = np.zeros((len(dts),7))
arr_count = np.zeros((len(dts),7))
for dt in dts:
df_dt = df[df.nb_dt == dt]
for cutoff in cutoffs:
df_c = df_dt[df_dt.cutoff == cutoff]
if cutoff == 10000:
arr_res[dts.index(dt),0]=np.nanmean(df_c.exp.values[1:2])
arr_count[dts.index(dt),0]=np.nanmean(df_c['count'].values[1:2])
arr_res[dts.index(dt), 1] = np.nanmean(df_c.exp.values[20 - 10:20 + 10])
arr_count[dts.index(dt), 1] = np.nanmean(df_c['count'].values[20 - 10:20 + 10])
arr_res[dts.index(dt), 2] = np.nanmean(df_c.exp.values[50 - 10:50 + 10])
arr_count[dts.index(dt), 2] = np.nanmean(df_c['count'].values[50 - 10:50 + 10])
elif cutoff == 100000:
arr_res[dts.index(dt),3]=np.nanmean(df_c.exp.values[20-5:20+20])
arr_count[dts.index(dt),3]=np.nanmean(df_c['count'].values[20-10:20+10])
arr_res[dts.index(dt),4]=np.nanmean(df_c.exp.values[50-10:50+10])
arr_count[dts.index(dt),4]=np.nanmean(df_c['count'].values[50-10:50+10])
elif cutoff == 1000000:
arr_res[dts.index(dt),5]=np.nanmean(df_c.exp.values[20-10:20+30])
arr_count[dts.index(dt),5]=np.nanmean(df_c['count'].values[20-10:20+30])
arr_res[dts.index(dt),6]=np.nanmean(df_c.exp.values[50-40:50+40])
arr_count[dts.index(dt),6]=np.nanmean(df_c['count'].values[50-40:50+40])
arr_res[arr_count<100]=np.nan
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(dt,df_c.exp.values[1],color=col[0])
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[1])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[2])
# elif cutoff == 100000:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[3])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[4])
# else:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[5])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[6])
fig = plt.figure(figsize=(7.2,9.3))
# plt.subplots_adjust(hspace=0.3)
grid = plt.GridSpec(8, 13, wspace=0.05, hspace=0.5)
ax = fig.add_subplot(grid[:2,:2])
# ax = fig.add_subplot(2, 1, 1)
vario = df[df.nb_dt == 720.]
vec_bins = []
vec_exp = []
vgm1 = vario[vario.cutoff == 10000]
vgm1 = vgm1[vgm1.bins<3000]
for i in range(6):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
# vec_bins += vgm1.bins.tolist()
# vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 100000]
vgm1 = vgm1[np.logical_and(vgm1.bins>3000,vgm1.bins<30000)]
vec_bins += vgm1.bins.tolist()
vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 1000000]
vgm1 = vgm1[vgm1.bins>30000]
for i in range(18):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
vec_bins = np.array(vec_bins)
vec_exp=np.array(vec_exp)
def sph_var(c0,c1,a1,h):
if h < a1:
vgm = c0 + c1 * (3 / 2 * h / a1-1 / 2 * (h / a1) ** 3)
else:
vgm = c0 + c1
return vgm
vect = np.array(list(np.arange(0,3000,1)) + list(np.arange(3000,30000,10)) + list(np.arange(30000,3000000,100)))
mod = []
c1s = [0] + list(arr_res[dts.index(720.),:])
a1s = [0.2,2,5,20,50,200]
#find unbiased sills
list_c = []
for j in range(len(a1s)):
print('Range:' + str(a1s[-1 - j]))
c = c1s[-2 - j] - c1s[-3 - j]
print(c)
for k in range(j):
# c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
if j>5:
c -= (sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000) - sph_var(0,list_c[k], a1s[-1-k]*1000,a1s[-2-j]*1000))
elif j==5:
c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
c = max(0, c)
list_c.append(c)
list_c.reverse()
#compute variogram
for i in range(len(vect)):
val = 0
for j in range(len(a1s)):
val += sph_var(0,list_c[j],a1s[j]*1000,vect[i])
mod.append(val)
mod = np.array(mod)
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,3))
ax.set_ylim((0,50))
ax.set_xticks([0,1,2])
ax.text(0.075, 0.975, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.vlines(0.15,0,60,color=col[0],linewidth=0.5)
ax.text(0.4,c1s[1]-5,'$s_0$',color=col[0],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(2,0,60,color=col[1],linewidth=0.5)
ax.text(2.2,c1s[2]-5,'$s_1$',color=col[1],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.set_ylabel('Variance of elevation differences (m$^2$)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:2,2:4])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,30))
ax.set_ylim((0,50))
ax.set_xticks([0,10,20])
# ax.text(0.075, 0.975, 'B', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(5,0,60,color=col[2],linewidth=0.5)
ax.text(6,c1s[3]-5,'$s_2$',color=col[2],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(20,0,60,color=col[3],linewidth=0.5)
ax.text(21,c1s[4]-5,'$s_3$',color=col[3],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted',label='Global mean variance')
ax.set_yticks([])
ax.set_xlabel('Spatial lag (km)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:2,4:6])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,550))
ax.set_ylim((0,50))
ax.set_xticks([0,100,200,300,400,500])
# ax.text(0.075, 0.975, 'C', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(50,0,60,colors=[col[4]],linewidth=0.5)
ax.text(70,c1s[5]-5,'$s_4$',color=col[4],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(200,0,60,colors=[col[5]],linewidth=0.5)
ax.text(220,c1s[6]-7,'$s_5$',color=col[5],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(500,0,60,colors=[col[6]],linewidth=0.5)
ax.text(480,c1s[6]-7,'$s_6$',color=col[6],ha='right',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.tick_params(width=0.35,length=2.5)
ax.plot([],[],color='grey',linestyle='dashed',label='Sum of spherical models')
ax.scatter([],[],color='black',marker='x',label='Empirical variance')
ax.vlines([],[],[],color=col[0],label='0.15 km',linewidth=0.5)
ax.vlines([],[],[],color=col[1],label='2 km',linewidth=0.5)
ax.vlines([],[],[],color=col[2],label='5 km',linewidth=0.5)
ax.vlines([],[],[],color=col[3],label='20 km',linewidth=0.5)
ax.vlines([],[],[],color=col[4],label='50 km',linewidth=0.5)
ax.vlines([],[],[],color=col[5],label='200 km',linewidth=0.5)
ax.vlines([],[],[],color=col[6],label='500 km',linewidth=0.5)
ax.legend(loc='lower right',ncol=3,title='Spatial correlations of GP elevation at $\Delta t$ = 720 days',title_fontsize=6,columnspacing=0.5)
ax.set_yticks([])
ax = fig.add_subplot(grid[2:4,:6])
coefs_list = []
y = None
# arr_res[0:1,4]=25
# arr_res[arr_res>25] = 25.
# arr_res[4,2]=np.nan
# arr_res[3:,3]=np.nan
# arr_res[0,3]=25.
# arr_res[0,3:] = np.nan
for i in [0,1,2,3,4,5,6]:
# i=0
# arr_res[-1,0]=np.nan
coefs , _ = scipy.optimize.curve_fit(lambda t,a,b:a*t+b, np.array(dts)[~np.isnan(arr_res[:,i])], np.sqrt(arr_res[:,i][~np.isnan(arr_res[:,i])]))
coefs_list.append(coefs)
x = np.arange(0, 3000, 1)
if y is not None:
y0 = y
else:
y0 = x*0
y = coefs[0]*x+coefs[1] #- 2*np.sin(x/365.2224*np.pi)**2
# y[y>25]=25.
# y[y<y0]=y0[y<y0]
y = y
ax.plot(x,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color=col[i])
ax.fill_between(x,y0**2 -2*np.sin(x/365.2224*2*np.pi)**2,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color = col[i],alpha=0.2)
# ax.fill_between(x,40*np.ones(len(x)),y,color='tab:gray')
# arr_res[0,3:]=25.
for i in [0,1,2,3,4,5,6]:
ax.scatter(dts,arr_res[:,i],color=col[i])
# ax.hlines(25,0,3000,linestyles='dashed',color='tab:gray')
ax.plot([],[],color='black',label='Model fit')
ax.fill_between([],[],color=col[0],label='0.15 km')
ax.fill_between([],[],color=col[1],label='2 km')
ax.fill_between([],[],color=col[2],label='5 km')
ax.fill_between([],[],color=col[3],label='20 km')
ax.scatter([],[],color='black',label='Empirical\nvariance')
ax.fill_between([],[],color=col[4],label='50 km')
ax.fill_between([],[],color=col[5],label='200 km')
ax.fill_between([],[],color=col[6],label='500 km')
ax.set_xlim([0,1370])
ax.set_ylim([0,78])
ax.set_ylabel('Variance of elevation differences (m$^{2}$)')
ax.set_xlabel('Days to closest observation $\Delta t$')
ax.vlines(720,0,100,colors='black',linestyles='dashed')
ax.text(740,5,'$\overline{s_{0}(\Delta t)}$: correlated until 0.15 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:orange')
ax.text(800,22,'$s_{1}(\Delta t)$: correlated until 2 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:blue')
ax.text(1150,35,'$s_{3}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:red')
ax.text(1250,48,'$s_{5}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:brown')
# ax.text(1000,22,'Fully correlated = Systematic',bbox= dict(boxstyle='round', facecolor='white', alpha=0.5),color='dimgrey')
# plt.xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(0.0625,0,0.9375,1),title='Spatial correlations of\nGP elevation with\ntime lag to observation',title_fontsize=6,ncol=2,columnspacing=0.5)
ax.text(0.025, 0.975, 'b', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.text(740,45,'panel (a)',fontweight='bold',va='bottom',ha='left')
# plt.savefig('/home/atom/ongoing/work_worldwide/figures/Figure_S12.png',dpi=360)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:6,:6])
corr_ranges = [150, 2000, 5000, 20000, 50000]
coefs = [np.array([1.26694247e-03, 3.03486839e+00]),
np.array([1.35708936e-03, 4.05065698e+00]),
np.array([1.42572733e-03, 4.20851582e+00]),
np.array([1.82537137e-03, 4.28515920e+00]),
np.array([1.87250755e-03, 4.31311254e+00]),
np.array([2.06249620e-03, 4.33582812e+00])]
thresh = [0, 0, 0, 180, 180]
ind = [1, 1, 1, 2, 1]
def sill_frac(t, a, b, c, d):
if t >= c:
return (coefs[-1][0] * t + coefs[-1][1]) ** 2 - (a * t + b) ** 2 - (
(coefs[-1][1] + c * coefs[-1][0]) ** 2 - (coefs[-1 - d][1] + c * coefs[-1 - d][0]) ** 2)
else:
return 0
corr_std_dt = [functools.partial(sill_frac,a=coefs[i][0],b=coefs[i][1],c=thresh[i],d=ind[i]) for i in range(len(corr_ranges))]
list_areas = [100*2**i for i in np.arange(3,31)]
list_df=[]
for area in list_areas:
dt = [180,540,900,1260]
perc_area = [0.5,0.2,0.2,0.1]
dx=100.
nsamp_dt = np.zeros(len(dt)) * np.nan
err_corr = np.zeros((len(dt), len(corr_ranges) + 1)) * np.nan
for j in np.arange(len(dt)):
final_num_err_dt = 10.
nsamp_dt[j] = perc_area[j]*area
sum_var = 0
for k in range(len(corr_ranges)+1):
if k != len(corr_ranges):
err_corr[j,k] = np.sqrt(max(0,corr_std_dt[len(corr_ranges)-1-k](dt[j]) - sum_var))
sum_var += err_corr[j,k] ** 2
else:
err_corr[j, k]=np.sqrt(max(0,final_num_err_dt**2-sum_var))
final_num_err_corr, int_err_corr = (np.zeros( len(corr_ranges) + 1) * np.nan for i in range(2))
for k in range(len(corr_ranges) + 1):
final_num_err_corr[k] = np.sqrt(np.nansum(err_corr[:, k] * nsamp_dt) / np.nansum(nsamp_dt))
if k == 0:
tmp_length = 200000
else:
tmp_length = corr_ranges[len(corr_ranges) - k]
if final_num_err_corr[k] == 0:
int_err_corr[k] = 0
else:
int_err_corr[k] = std_err(final_num_err_corr[k],
neff_circ(area, [(tmp_length, 'Sph', final_num_err_corr[k] ** 2)]))
df_int = pd.DataFrame()
for i in range(len(corr_ranges)):
df_int['err_corr_'+str(corr_ranges[i])] =[int_err_corr[len(corr_ranges)-i]]
df_int['err_corr_200000'] =[int_err_corr[0]]
df_int['area']=area
list_df.append(df_int)
df = pd.concat(list_df)
#First panel: sources for volume change
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
tmp_y = np.zeros(len(list_areas))
tmp_y_next = np.zeros(len(list_areas))
for i in range(6):
tmp_y = tmp_y_next
tmp_y_next = tmp_y + (2*df.iloc[:len(list_areas),i])**2
ax.fill_between(x=np.array(list_areas)/1000000,y1=tmp_y,y2=tmp_y_next,interpolate=True,color=col[i],alpha=0.5,edgecolor=None)
if i == 0:
ax.plot(np.array(list_areas)/1000000,tmp_y_next,color='black',linestyle='--')
ax.fill_between([],[],color=col[0],label='0.15 km',alpha=0.5)
ax.fill_between([],[],color=col[1],label='2 km',alpha=0.5)
ax.fill_between([],[],color=col[2],label='5 km',alpha=0.5)
ax.fill_between([],[],color=col[3],label='20 km',alpha=0.5)
ax.fill_between([],[],color=col[4],label='50 km',alpha=0.5)
ax.fill_between([],[],color=col[5],label='200 km',alpha=0.5)
ax.plot([],[],color='black',linestyle='--',label='Limit GP/spatial\ncorrelation sources')
ax.set_xscale('log')
ax.set_xlabel('Glacier area (km²)')
ax.set_ylabel('Squared uncertainties of\nspecific volume change (m²)')
ax.set_ylim((0,30))
ax.set_xlim((0.005,7.5*10**10/1000000))
handles, labels = ax.get_legend_handles_labels()
# sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
print(labels[0:2])
ax.legend(handles[0:2]+(handles[-1],)+handles[2:-1], labels[0:2]+(labels[-1],)+labels[2:-1],title='Uncertainty sources for specific volume change\n(i.e. mean elevation change)',title_fontsize=6,ncol=3,columnspacing=0.5)
ax.text(0.023,4*1.2,'Uncertainty \nsources from\npixel-wise\nGP regression\n(0.15 km)',color=plt.cm.Greys(0.8),va='center',ha='center')
ax.text(5,4*2,'Uncertainty sources from \nshort- to long-\nrange correlations\n(2 km - 200 km)',color=plt.cm.Greys(0.8),va='center',ha='center')
ax.text(0.025, 0.95, 'c', transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='left')
ax.tick_params(width=0.35,length=2.5)
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from pybob.ddem_tools import nmad
df_gp = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv')
df_hr = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.category.values=='matthias')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.area.values<1000000.)
ind = df_hr.perc_meas>0.70
list_rgiid = list(df_hr[ind].rgiid)
list_area = list(df_hr[df_hr.rgiid.isin(list_rgiid)].area)
list_rgiid = [rgiid for _, rgiid in sorted(zip(list_area,list_rgiid),reverse=True)]
list_area = sorted(list_area,reverse=True)
ax = fig.add_subplot(grid[:2, 7:])
kval = 3.5
# sites=np.unique(data['Site'])
# colors=['b','g','r','c','m','y','k','grey']
colors = ['tab:blue','tab:orange','tab:red','tab:grey']
# sites=sites.tolist()
ax.plot([-3, 0.5], [-3, 0.5], color='k', linestyle='-', linewidth=0.75)
label_list=[]
diff2 = []
list_area2 = []
for rgiid in list_rgiid:
df_gp_rgiid = df_gp[df_gp.rgiid==rgiid]
df_hr_rgiid = df_hr[df_hr.rgiid==rgiid]
if df_hr_rgiid.category.values[0]=='matthias':
col = colors[0]
elif df_hr_rgiid.category.values[0]=='brian':
col = colors[1]
else:
if df_hr_rgiid.site.values[0] in ['Chhota','Gangotri','Abramov','Mera']:
col = colors[2]
elif df_hr_rgiid.site.values[0] == 'Yukon':
col=colors[3]
elif df_hr_rgiid.site.values[0] == 'MontBlanc':
col=colors[0]
ax.errorbar(df_hr_rgiid.dhdt.values[0], df_gp_rgiid.dhdt.values[0],
xerr=df_hr_rgiid.err_dhdt.values[0],
yerr=df_gp_rgiid.err_dhdt.values[0],marker='o',mec='k',
ms=kval*(df_hr_rgiid.area.values[0]/1000000)**0.5/3, mew=0.25,elinewidth=0.25,ecolor=col,mfc=col,alpha=0.9)
#,ecolor=colors[sites.index(data['Site'][value])]mfc=colors[sites.index(data['Site'][value])],alpha=0.5)
diff2.append(df_hr_rgiid.dhdt.values[0]-df_gp_rgiid.dhdt.values[0])
list_area2.append(df_hr_rgiid.area.values[0])
ax.text(-1.9,0,'Mean bias:\n'+str(np.round(np.nanmean(diff2),2))+'$\pm$'+str(np.round(2*nmad(diff2)/np.sqrt(len(diff2)),2))+' m yr$^{-1}$',ha='center',va='center',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
print(np.nanmean(diff2))
print(np.nansum(np.array(diff2)*np.array(list_area2))/np.nansum(np.array(list_area2)))
ax.set_ylabel('Specific volume change (m yr$^{-1}$)')
ax.set_xlabel('High-resolution specific volume change (m yr$^{-1}$)')
#plt.legend(loc='upper left')
ax.set_xlim([-2.95, 0.5])
ax.set_ylim([-2.95, 0.5])
#mask = ~np.isnan(b_dot_anomaly) & ~np.isnan(dP)
# slope, intercept, r_value, p_value, std_err = stats.linregress(data['MB GEOD'], data['MB ASTER'])
# print(slope)
# print("r-squared:", r_value**2)
# print('std err:', std_err)
# plt.text(-320, -1250, 'Slope:' + str(np.round(slope, 2)))
# plt.text(-320, -1300, 'r$^{2}$:' + str(np.round(r_value**2, 2)))
## add symbols to show relative size of glaciers
ax.errorbar(-2500/1000,-150/1000,ms = kval*(5.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k',marker='o')
ax.errorbar(-2500/1000,-500/1000,ms = kval*(50.0**0.5)/3, xerr=0.0001, yerr=0.0001,color='k',marker='o')
ax.errorbar(-2500/1000,-1250/1000,ms = kval*(500.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k', marker='o')
ax.text(-2500/1000, -220/1000,'5 km$^2$',va='top',ha='center')
ax.text(-2500/1000, -650/1000,'50 km$^2$',va='top',ha='center')
ax.text(-2500/1000, -1730/1000,'500 km$^2$',va='top',ha='center')
ax.text(0.025,0.966,'d',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.plot([],[],color=colors[0],label='Alps',lw=1)
ax.plot([],[],color=colors[1],label='Western NA',lw=1)
ax.plot([],[],color=colors[2],label='High Mountain Asia',lw=1)
ax.plot([],[],color=colors[3],label='Alaska',lw=1)
ax.plot([],[],color='k',label='1:1 line',lw=0.5)
ax.legend(loc='lower right',title='Validation of volume changes with high-resolution DEMs',title_fontsize=6,ncol=3)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:6, 7:])
ax.text(0.025,0.966,'f',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_err_dhdt=[0.1,0.2,0.4,0.6,0.8,1,1.5,2]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_err_dhdt)-1):
ind = np.logical_and(df_gp.err_dhdt < vec_err_dhdt[i+1],df_gp.err_dhdt>=vec_err_dhdt[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
err_dhdt.append(err)
diff_dhdt.append(diff)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_err_dhdt[i+1],vec_err_dhdt[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i < 2:
va_text = 'bottom'
y_off = 0.1
if i == 0:
x_off = -0.05
else:
x_off = 0
else:
va_text = 'top'
y_off = -0.1
ax.text(bin_err[i]+x_off, list_err_emp[i] + y_off, str(nb_gla[i]) + ' gla.\n' + str(np.round(nb_95ci[i] * 100, 0)) + '%',
va=va_text, ha='center')
ax.plot([0,2],[0,2],color='k',label='1:1 line',lw=0.5)
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Error (1$\sigma$) comparison to HR elevation differences\n(printed: glacier number and $\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xlabel('Theoretical specific volume change uncertainty (m yr$^{-1}$)')
ax.set_ylabel('Empirical specific volume\nchange uncertainty (m yr$^{-1}$)')
ax.set_ylim((0,1.4))
ax.legend(loc='upper right',title='Validation of volume change uncertainties\nwith varying uncertainty size',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[2:4, 7:])
ax.text(0.025,0.966,'e',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_area=[0.01,0.05,0.2,1,5,20,200,1500]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_area)-1):
ind = np.logical_and(df_gp.area.values/1000000 < vec_area[i+1],df_gp.area.values/1000000>=vec_area[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
diff_dhdt.append(diff)
err_dhdt.append(err)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_area[i+1],vec_area[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i <2:
va_text = 'top'
y_off = -0.1
else:
va_text = 'bottom'
y_off = 0.1
ax.text(bin_err[i],list_err_emp[i]+y_off,str(nb_gla[i])+' gla.\n'+str(np.round(nb_95ci[i]*100,0))+'%',va=va_text,ha='center')
ax.plot(bin_err,list_err_the,color='black',label='Theoretical uncertainty (1$\sigma$):\nspatially integrated variograms',marker='x')
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Empirical uncertainty (1$\sigma$):\ncomparison to HR elevation differences\n(printed: glacier number and\n$\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xscale('log')
ax.set_xlabel('Glacier area (km$^{2}$)')
ax.set_ylabel('Specific volume\nchange uncertainty (m yr$^{-1}$)')
ax.set_ylim([0,1.4])
ax.legend(loc='upper right',title='Validation of volume change uncertainties\nwith varying glaciers area',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax2 = fig.add_subplot(grid[6:,:])
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
list_fn_reg = [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in np.arange(1,20)]
list_df_out = []
for fn_reg in list_fn_reg:
df = pd.read_csv(fn_reg)
mult_ann = 20
area = df.area.values[0]
dvol = (df[df.time == '2000-01-01'].dvol.values - df[df.time == '2020-01-01'].dvol.values)[0]
dh = dvol / area
err_dh = np.sqrt(
df[df.time == '2000-01-01'].err_dh.values[0] ** 2 +
df[df.time == '2020-01-01'].err_dh.values[0] ** 2)
err_dvol = np.sqrt((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100. * area) ** 2)
dvoldt = dvol / mult_ann
err_dvoldt = err_dvol / mult_ann
dmdt = dvol * 0.85 / 10 ** 9 / mult_ann
err_dmdt = np.sqrt((err_dvol * 0.85 / 10 ** 9) ** 2 + (
dvol * 0.06 / 10 ** 9) ** 2) / mult_ann
sq_err_dmdt_fromdh = (err_dh*area)**2 * (0.85 / mult_ann)**2 /area**2
sq_err_dmdt_fromarea = (dh * df.perc_err_cont.values[0] / 100. * area) ** 2 * (0.85 / mult_ann)**2 /area**2
sq_err_dmdt_fromdensity = (dvol * 0.06) ** 2 / mult_ann**2 / area**2
dmdtda = dmdt/area*10**9
df_out = pd.DataFrame()
df_out['region']=[df.reg.values[0]]
df_out['dmdtda'] = [dmdtda]
df_out['sq_err_fromdh'] = [sq_err_dmdt_fromdh]
df_out['sq_err_fromarea'] = [sq_err_dmdt_fromarea]
df_out['sq_err_fromdensity'] = [sq_err_dmdt_fromdensity]
df_out['area'] = [area]
list_df_out.append(df_out)
df_all = pd.concat(list_df_out)
df_g = pd.DataFrame()
df_g['region']=[21]
df_g['dmdtda'] = [np.nansum(df_all.dmdtda.values*df_all.area.values)/np.nansum(df_all.area.values)]
df_g['sq_err_fromdh'] = [np.nansum(df_all.sq_err_fromdh.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['sq_err_fromarea'] = [np.nansum(df_all.sq_err_fromarea.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['sq_err_fromdensity'] = [np.nansum(df_all.sq_err_fromdensity.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['area'] = [np.nansum(df_all.area.values)]
df_noper = pd.DataFrame()
ind = ~df_all.region.isin([5,19])
df_noper['region']=[20]
df_noper['dmdtda'] = [np.nansum(df_all[ind].dmdtda.values*df_all[ind].area.values)/np.nansum(df_all[ind].area.values)]
df_noper['sq_err_fromdh'] = np.nansum(df_all[ind].sq_err_fromdh.values * df_all[ind].area.values **2)/np.nansum(df_all[ind].area.values)**2
df_noper['sq_err_fromarea'] = np.nansum(df_all[ind].sq_err_fromarea.values * df_all[ind].area.values **2)/np.nansum(df_all[ind].area.values)**2
df_noper['sq_err_fromdensity'] = np.nansum(df_all[ind].sq_err_fromdensity.values * df_all[ind].area.values **2)/np.nansum(df_all[ind].area.values)**2
df_noper['area'] = [np.nansum(df_all[ind].area.values)]
df_all = pd.concat([df_all,df_noper,df_g])
ticks = ['Alaska (01)','Western Canada\nand USA (02)','Arctic Canada\nNorth (03)','Arctic Canada\nSouth (04)','Greenland\nPeriphery (05)', 'Iceland (06)','Svalbard and\nJan Mayen (07)', 'Scandinavia (08)','Russian\nArctic (09)','North Asia (10)','Central\nEurope (11)','Caucasus and\nMiddle East (12)','Central\nAsia (13)','South Asia\nWest (14)','South Asia\nEast (15)','Low\nLatitudes (16)','Southern\nAndes (17)','New\nZealand (18)','Antarctic and\nSubantarctic (19)','Global excl.\n 05 and 19','Global']
x_shift = 0
for i in np.arange(1,22):
if i==20:
x_shift+=2
df_tmp = df_all[df_all.region==i]
y1 = 4*df_tmp.sq_err_fromdh.values[0]
y2 = y1 + 4*df_tmp.sq_err_fromarea.values[0]
y3 = y2 + 4*df_tmp.sq_err_fromdensity.values[0]
ax2.fill_between(x_shift+np.array((i,i+1)),(0,0),(y1,y1),color='tab:red',edgecolor='white')
ax2.fill_between(x_shift+np.array((i,i+1)),(y1,y1),(y2,y2),color='tab:blue',edgecolor='white')
ax2.fill_between(x_shift+np.array((i,i+1)),(y2,y2),(y3,y3),color='tab:pink',edgecolor='white')
ax2.fill_between([],[],color='tab:red',label='Elevation change')
ax2.fill_between([],[],color='tab:blue',label='Glacier outlines')
ax2.fill_between([],[],color='tab:pink',label='Density conversion')
ax2.text(0.025, 0.95, 'g', transform=ax2.transAxes, fontsize=8, fontweight='bold', va='top', ha='left')
ax2.set_ylabel('Squared uncertainties of\nspecific mass change rate (m² w.e. yr$^{-2}$)')
ax2.set_xlabel('RGI region')
ax2.legend(title='Uncertainty sources for\nspecific mass change\nduring 2000-2019',loc='upper right',bbox_to_anchor=(0.3,1),title_fontsize=6)
ax2.set_xticks(list(np.arange(1.5,20.5))+[22.5,23.5])
ax2.set_xticklabels(ticks,rotation=90)
ax2.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
ax2.fill_between((22,24),(-0.00001,-0.00001),(4*0.000275,4*0.000275),facecolor='None',edgecolor='black')
ax2.text(23,4*0.0005,'panel (h)',fontweight='bold',va='bottom',ha='center')
ax2.tick_params(width=0.35,length=2.5)
ax3 = inset_axes(ax2,width="15%",height='50%',loc='upper right')
x_shift=0
for i in np.arange(20,22):
if i==20:
x_shift+=2
df_tmp = df_all[df_all.region==i]
y1 = 4*df_tmp.sq_err_fromdh.values[0]
y2 = y1 + 4*df_tmp.sq_err_fromarea.values[0]
y3 = y2 + 4*df_tmp.sq_err_fromdensity.values[0]
ax3.fill_between(x_shift+np.array((i,i+1)),(0,0),(y1,y1),color='tab:red',edgecolor='white')
ax3.fill_between(x_shift+np.array((i,i+1)),(y1,y1),(y2,y2),color='tab:blue',edgecolor='white')
ax3.fill_between(x_shift+np.array((i,i+1)),(y2,y2),(y3,y3),color='tab:pink',edgecolor='white')
ax3.set_xlim((22,24))
ax3.set_xticks([22.5,23.5])
ax3.set_ylim((-0.00001,4*0.000275))
ax3.set_xticklabels(ticks[-2:],rotation=90)
ax3.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
ax3.text(0.9, 0.95, 'h', transform=ax3.transAxes, fontsize=8, fontweight='bold', va='top', ha='right')
ax3.tick_params(width=0.35,length=2.5)
plt.savefig('/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg',dpi=500,bbox_inches='tight')
| [
"numpy.abs",
"numpy.nanmedian",
"pandas.read_csv",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"pyddem.volint_tools.neff_circ",
"numpy.sqrt",
"numpy.round",
"numpy.nanmean",
"pandas.DataFrame",
"matplotlib.pyplot.rcParams.update",
"matplotlib.tic... | [((272, 309), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 5}"], {}), "({'font.size': 5})\n", (291, 309), True, 'import matplotlib.pyplot as plt\n'), ((310, 356), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.linewidth': 0.35}"], {}), "({'lines.linewidth': 0.35})\n", (329, 356), True, 'import matplotlib.pyplot as plt\n'), ((356, 401), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'axes.linewidth': 0.35}"], {}), "({'axes.linewidth': 0.35})\n", (375, 401), True, 'import matplotlib.pyplot as plt\n'), ((401, 447), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.markersize': 2.5}"], {}), "({'lines.markersize': 2.5})\n", (420, 447), True, 'import matplotlib.pyplot as plt\n'), ((447, 490), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'axes.labelpad': 1.5}"], {}), "({'axes.labelpad': 1.5})\n", (466, 490), True, 'import matplotlib.pyplot as plt\n'), ((676, 696), 'pandas.read_csv', 'pd.read_csv', (['all_csv'], {}), '(all_csv)\n', (687, 696), True, 'import pandas as pd\n'), ((4219, 4249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.2, 9.3)'}), '(figsize=(7.2, 9.3))\n', (4229, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4335), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(8)', '(13)'], {'wspace': '(0.05)', 'hspace': '(0.5)'}), '(8, 13, wspace=0.05, hspace=0.5)\n', (4303, 4335), True, 'import matplotlib.pyplot as plt\n'), ((5106, 5124), 'numpy.array', 'np.array', (['vec_bins'], {}), '(vec_bins)\n', (5114, 5124), True, 'import numpy as np\n'), ((5133, 5150), 'numpy.array', 'np.array', (['vec_exp'], {}), '(vec_exp)\n', (5141, 5150), True, 'import numpy as np\n'), ((6221, 6234), 'numpy.array', 'np.array', (['mod'], {}), '(mod)\n', (6229, 6234), True, 'import numpy as np\n'), ((15049, 15067), 'pandas.concat', 'pd.concat', (['list_df'], {}), '(list_df)\n', (15058, 15067), True, 'import pandas as pd\n'), ((17253, 17319), 'pandas.read_csv', 'pd.read_csv', (['"""/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv"""'], {}), "('/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv')\n", (17264, 17319), True, 'import pandas as pd\n'), ((17328, 17394), 'pandas.read_csv', 'pd.read_csv', (['"""/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv"""'], {}), "('/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv')\n", (17339, 17394), True, 'import pandas as pd\n'), ((27493, 27515), 'pandas.concat', 'pd.concat', (['list_df_out'], {}), '(list_df_out)\n', (27502, 27515), True, 'import pandas as pd\n'), ((27524, 27538), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27536, 27538), True, 'import pandas as pd\n'), ((28101, 28115), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28113, 28115), True, 'import pandas as pd\n'), ((28793, 28828), 'pandas.concat', 'pd.concat', (['[df_all, df_noper, df_g]'], {}), '([df_all, df_noper, df_g])\n', (28802, 28828), True, 'import pandas as pd\n'), ((29359, 29375), 'numpy.arange', 'np.arange', (['(1)', '(22)'], {}), '(1, 22)\n', (29368, 29375), True, 'import numpy as np\n'), ((30838, 30899), 'mpl_toolkits.axes_grid.inset_locator.inset_axes', 'inset_axes', (['ax2'], {'width': '"""15%"""', 'height': '"""50%"""', 'loc': '"""upper right"""'}), "(ax2, width='15%', height='50%', loc='upper right')\n", (30848, 30899), False, 'from mpl_toolkits.axes_grid.inset_locator import inset_axes\n'), ((30916, 30933), 'numpy.arange', 'np.arange', (['(20)', '(22)'], {}), '(20, 22)\n', (30925, 30933), True, 'import numpy as np\n'), ((31812, 31924), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg"""'], {'dpi': '(500)', 'bbox_inches': '"""tight"""'}), "('/home/atom/ongoing/work_worldwide/figures/final/ED_Figure_5.jpg',\n dpi=500, bbox_inches='tight')\n", (31823, 31924), True, 'import matplotlib.pyplot as plt\n'), ((4779, 4830), 'numpy.logical_and', 'np.logical_and', (['(vgm1.bins > 3000)', '(vgm1.bins < 30000)'], {}), '(vgm1.bins > 3000, vgm1.bins < 30000)\n', (4793, 4830), True, 'import numpy as np\n'), ((10128, 10149), 'numpy.arange', 'np.arange', (['(0)', '(3000)', '(1)'], {}), '(0, 3000, 1)\n', (10137, 10149), True, 'import numpy as np\n'), ((12701, 12738), 'numpy.array', 'np.array', (['[0.00126694247, 3.03486839]'], {}), '([0.00126694247, 3.03486839])\n', (12709, 12738), True, 'import numpy as np\n'), ((12754, 12791), 'numpy.array', 'np.array', (['[0.00135708936, 4.05065698]'], {}), '([0.00135708936, 4.05065698])\n', (12762, 12791), True, 'import numpy as np\n'), ((12807, 12844), 'numpy.array', 'np.array', (['[0.00142572733, 4.20851582]'], {}), '([0.00142572733, 4.20851582])\n', (12815, 12844), True, 'import numpy as np\n'), ((12860, 12896), 'numpy.array', 'np.array', (['[0.00182537137, 4.2851592]'], {}), '([0.00182537137, 4.2851592])\n', (12868, 12896), True, 'import numpy as np\n'), ((12913, 12950), 'numpy.array', 'np.array', (['[0.00187250755, 4.31311254]'], {}), '([0.00187250755, 4.31311254])\n', (12921, 12950), True, 'import numpy as np\n'), ((12966, 13002), 'numpy.array', 'np.array', (['[0.0020624962, 4.33582812]'], {}), '([0.0020624962, 4.33582812])\n', (12974, 13002), True, 'import numpy as np\n'), ((13336, 13422), 'functools.partial', 'functools.partial', (['sill_frac'], {'a': 'coefs[i][0]', 'b': 'coefs[i][1]', 'c': 'thresh[i]', 'd': 'ind[i]'}), '(sill_frac, a=coefs[i][0], b=coefs[i][1], c=thresh[i], d=\n ind[i])\n', (13353, 13422), False, 'import functools\n'), ((14805, 14819), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14817, 14819), True, 'import pandas as pd\n'), ((19462, 19479), 'numpy.nanmean', 'np.nanmean', (['diff2'], {}), '(diff2)\n', (19472, 19479), True, 'import numpy as np\n'), ((21540, 21631), 'numpy.logical_and', 'np.logical_and', (['(df_gp.err_dhdt < vec_err_dhdt[i + 1])', '(df_gp.err_dhdt >= vec_err_dhdt[i])'], {}), '(df_gp.err_dhdt < vec_err_dhdt[i + 1], df_gp.err_dhdt >=\n vec_err_dhdt[i])\n', (21554, 21631), True, 'import numpy as np\n'), ((23920, 24030), 'numpy.logical_and', 'np.logical_and', (['(df_gp.area.values / 1000000 < vec_area[i + 1])', '(df_gp.area.values / 1000000 >= vec_area[i])'], {}), '(df_gp.area.values / 1000000 < vec_area[i + 1], df_gp.area.\n values / 1000000 >= vec_area[i])\n', (23934, 24030), True, 'import numpy as np\n'), ((26210, 26229), 'pandas.read_csv', 'pd.read_csv', (['fn_reg'], {}), '(fn_reg)\n', (26221, 26229), True, 'import pandas as pd\n'), ((26413, 26527), 'numpy.sqrt', 'np.sqrt', (["(df[df.time == '2000-01-01'].err_dh.values[0] ** 2 + df[df.time ==\n '2020-01-01'].err_dh.values[0] ** 2)"], {}), "(df[df.time == '2000-01-01'].err_dh.values[0] ** 2 + df[df.time ==\n '2020-01-01'].err_dh.values[0] ** 2)\n", (26420, 26527), True, 'import numpy as np\n'), ((26556, 26645), 'numpy.sqrt', 'np.sqrt', (['((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100.0 * area) ** 2)'], {}), '((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100.0 *\n area) ** 2)\n', (26563, 26645), True, 'import numpy as np\n'), ((27169, 27183), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27181, 27183), True, 'import pandas as pd\n'), ((28058, 28087), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (28067, 28087), True, 'import numpy as np\n'), ((28321, 28395), 'numpy.nansum', 'np.nansum', (['(df_all[ind].sq_err_fromdh.values * df_all[ind].area.values ** 2)'], {}), '(df_all[ind].sq_err_fromdh.values * df_all[ind].area.values ** 2)\n', (28330, 28395), True, 'import numpy as np\n'), ((28463, 28539), 'numpy.nansum', 'np.nansum', (['(df_all[ind].sq_err_fromarea.values * df_all[ind].area.values ** 2)'], {}), '(df_all[ind].sq_err_fromarea.values * df_all[ind].area.values ** 2)\n', (28472, 28539), True, 'import numpy as np\n'), ((28610, 28689), 'numpy.nansum', 'np.nansum', (['(df_all[ind].sq_err_fromdensity.values * df_all[ind].area.values ** 2)'], {}), '(df_all[ind].sq_err_fromdensity.values * df_all[ind].area.values ** 2)\n', (28619, 28689), True, 'import numpy as np\n'), ((28747, 28781), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28756, 28781), True, 'import numpy as np\n'), ((30577, 30609), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.0e"""'], {}), "('%.0e')\n", (30601, 30609), True, 'import matplotlib.ticker as mtick\n'), ((4564, 4613), 'numpy.nanmean', 'np.nanmean', (['vgm1.bins.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.bins.values[0 + i * 5:5 + i * 5])\n', (4574, 4613), True, 'import numpy as np\n'), ((4623, 4671), 'numpy.nanmean', 'np.nanmean', (['vgm1.exp.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.exp.values[0 + i * 5:5 + i * 5])\n', (4633, 4671), True, 'import numpy as np\n'), ((4993, 5042), 'numpy.nanmean', 'np.nanmean', (['vgm1.bins.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.bins.values[0 + i * 5:5 + i * 5])\n', (5003, 5042), True, 'import numpy as np\n'), ((5052, 5100), 'numpy.nanmean', 'np.nanmean', (['vgm1.exp.values[0 + i * 5:5 + i * 5]'], {}), '(vgm1.exp.values[0 + i * 5:5 + i * 5])\n', (5062, 5100), True, 'import numpy as np\n'), ((13481, 13497), 'numpy.arange', 'np.arange', (['(3)', '(31)'], {}), '(3, 31)\n', (13490, 13497), True, 'import numpy as np\n'), ((16768, 16785), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['(0.8)'], {}), '(0.8)\n', (16780, 16785), True, 'import matplotlib.pyplot as plt\n'), ((16913, 16930), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['(0.8)'], {}), '(0.8)\n', (16925, 16930), True, 'import matplotlib.pyplot as plt\n'), ((21860, 21979), 'numpy.sqrt', 'np.sqrt', (['(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.rgiid ==\n rgiid].err_dhdt.values[0] ** 2)'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.\n rgiid == rgiid].err_dhdt.values[0] ** 2)\n', (21867, 21979), True, 'import numpy as np\n'), ((22390, 22405), 'pybob.ddem_tools.nmad', 'nmad', (['diff_dhdt'], {}), '(diff_dhdt)\n', (22394, 22405), False, 'from pybob.ddem_tools import nmad\n'), ((22431, 22453), 'numpy.nanmedian', 'np.nanmedian', (['err_dhdt'], {}), '(err_dhdt)\n', (22443, 22453), True, 'import numpy as np\n'), ((22474, 22521), 'numpy.mean', 'np.mean', (['(vec_err_dhdt[i + 1], vec_err_dhdt[i])'], {}), '((vec_err_dhdt[i + 1], vec_err_dhdt[i]))\n', (22481, 22521), True, 'import numpy as np\n'), ((24254, 24373), 'numpy.sqrt', 'np.sqrt', (['(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.rgiid ==\n rgiid].err_dhdt.values[0] ** 2)'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0] ** 2 + df_gp[df_gp.\n rgiid == rgiid].err_dhdt.values[0] ** 2)\n', (24261, 24373), True, 'import numpy as np\n'), ((24784, 24799), 'pybob.ddem_tools.nmad', 'nmad', (['diff_dhdt'], {}), '(diff_dhdt)\n', (24788, 24799), False, 'from pybob.ddem_tools import nmad\n'), ((24825, 24847), 'numpy.nanmedian', 'np.nanmedian', (['err_dhdt'], {}), '(err_dhdt)\n', (24837, 24847), True, 'import numpy as np\n'), ((24868, 24907), 'numpy.mean', 'np.mean', (['(vec_area[i + 1], vec_area[i])'], {}), '((vec_area[i + 1], vec_area[i]))\n', (24875, 24907), True, 'import numpy as np\n'), ((26138, 26154), 'numpy.arange', 'np.arange', (['(1)', '(20)'], {}), '(1, 20)\n', (26147, 26154), True, 'import numpy as np\n'), ((26768, 26840), 'numpy.sqrt', 'np.sqrt', (['((err_dvol * 0.85 / 10 ** 9) ** 2 + (dvol * 0.06 / 10 ** 9) ** 2)'], {}), '((err_dvol * 0.85 / 10 ** 9) ** 2 + (dvol * 0.06 / 10 ** 9) ** 2)\n', (26775, 26840), True, 'import numpy as np\n'), ((27577, 27629), 'numpy.nansum', 'np.nansum', (['(df_all.dmdtda.values * df_all.area.values)'], {}), '(df_all.dmdtda.values * df_all.area.values)\n', (27586, 27629), True, 'import numpy as np\n'), ((27628, 27657), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (27637, 27657), True, 'import numpy as np\n'), ((27684, 27748), 'numpy.nansum', 'np.nansum', (['(df_all.sq_err_fromdh.values * df_all.area.values ** 2)'], {}), '(df_all.sq_err_fromdh.values * df_all.area.values ** 2)\n', (27693, 27748), True, 'import numpy as np\n'), ((27809, 27875), 'numpy.nansum', 'np.nansum', (['(df_all.sq_err_fromarea.values * df_all.area.values ** 2)'], {}), '(df_all.sq_err_fromarea.values * df_all.area.values ** 2)\n', (27818, 27875), True, 'import numpy as np\n'), ((27939, 28008), 'numpy.nansum', 'np.nansum', (['(df_all.sq_err_fromdensity.values * df_all.area.values ** 2)'], {}), '(df_all.sq_err_fromdensity.values * df_all.area.values ** 2)\n', (27948, 28008), True, 'import numpy as np\n'), ((28196, 28258), 'numpy.nansum', 'np.nansum', (['(df_all[ind].dmdtda.values * df_all[ind].area.values)'], {}), '(df_all[ind].dmdtda.values * df_all[ind].area.values)\n', (28205, 28258), True, 'import numpy as np\n'), ((28257, 28291), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28266, 28291), True, 'import numpy as np\n'), ((28395, 28429), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28404, 28429), True, 'import numpy as np\n'), ((28539, 28573), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28548, 28573), True, 'import numpy as np\n'), ((28689, 28723), 'numpy.nansum', 'np.nansum', (['df_all[ind].area.values'], {}), '(df_all[ind].area.values)\n', (28698, 28723), True, 'import numpy as np\n'), ((31626, 31658), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.0e"""'], {}), "('%.0e')\n", (31650, 31658), True, 'import matplotlib.ticker as mtick\n'), ((2225, 2257), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[1:2]'], {}), '(df_c.exp.values[1:2])\n', (2235, 2257), True, 'import numpy as np\n'), ((2297, 2334), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[1:2]"], {}), "(df_c['count'].values[1:2])\n", (2307, 2334), True, 'import numpy as np\n'), ((2375, 2419), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[20 - 10:20 + 10]'], {}), '(df_c.exp.values[20 - 10:20 + 10])\n', (2385, 2419), True, 'import numpy as np\n'), ((2462, 2511), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[20 - 10:20 + 10]"], {}), "(df_c['count'].values[20 - 10:20 + 10])\n", (2472, 2511), True, 'import numpy as np\n'), ((2552, 2596), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[50 - 10:50 + 10]'], {}), '(df_c.exp.values[50 - 10:50 + 10])\n', (2562, 2596), True, 'import numpy as np\n'), ((2639, 2688), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[50 - 10:50 + 10]"], {}), "(df_c['count'].values[50 - 10:50 + 10])\n", (2649, 2688), True, 'import numpy as np\n'), ((5385, 5415), 'numpy.arange', 'np.arange', (['(30000)', '(3000000)', '(100)'], {}), '(30000, 3000000, 100)\n', (5394, 5415), True, 'import numpy as np\n'), ((10001, 10014), 'numpy.array', 'np.array', (['dts'], {}), '(dts)\n', (10009, 10014), True, 'import numpy as np\n'), ((19545, 19565), 'numpy.array', 'np.array', (['list_area2'], {}), '(list_area2)\n', (19553, 19565), True, 'import numpy as np\n'), ((22539, 22557), 'numpy.nansum', 'np.nansum', (['ci_size'], {}), '(ci_size)\n', (22548, 22557), True, 'import numpy as np\n'), ((24925, 24943), 'numpy.nansum', 'np.nansum', (['ci_size'], {}), '(ci_size)\n', (24934, 24943), True, 'import numpy as np\n'), ((27748, 27777), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (27757, 27777), True, 'import numpy as np\n'), ((27875, 27904), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (27884, 27904), True, 'import numpy as np\n'), ((28008, 28037), 'numpy.nansum', 'np.nansum', (['df_all.area.values'], {}), '(df_all.area.values)\n', (28017, 28037), True, 'import numpy as np\n'), ((29623, 29643), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (29631, 29643), True, 'import numpy as np\n'), ((29719, 29739), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (29727, 29739), True, 'import numpy as np\n'), ((29818, 29838), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (29826, 29838), True, 'import numpy as np\n'), ((30474, 30494), 'numpy.arange', 'np.arange', (['(1.5)', '(20.5)'], {}), '(1.5, 20.5)\n', (30483, 30494), True, 'import numpy as np\n'), ((31181, 31201), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (31189, 31201), True, 'import numpy as np\n'), ((31277, 31297), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (31285, 31297), True, 'import numpy as np\n'), ((31376, 31396), 'numpy.array', 'np.array', (['(i, i + 1)'], {}), '((i, i + 1))\n', (31384, 31396), True, 'import numpy as np\n'), ((2757, 2800), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[20 - 5:20 + 20]'], {}), '(df_c.exp.values[20 - 5:20 + 20])\n', (2767, 2800), True, 'import numpy as np\n'), ((2836, 2885), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[20 - 10:20 + 10]"], {}), "(df_c['count'].values[20 - 10:20 + 10])\n", (2846, 2885), True, 'import numpy as np\n'), ((2919, 2963), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[50 - 10:50 + 10]'], {}), '(df_c.exp.values[50 - 10:50 + 10])\n', (2929, 2963), True, 'import numpy as np\n'), ((2999, 3048), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[50 - 10:50 + 10]"], {}), "(df_c['count'].values[50 - 10:50 + 10])\n", (3009, 3048), True, 'import numpy as np\n'), ((5324, 5345), 'numpy.arange', 'np.arange', (['(0)', '(3000)', '(1)'], {}), '(0, 3000, 1)\n', (5333, 5345), True, 'import numpy as np\n'), ((5352, 5378), 'numpy.arange', 'np.arange', (['(3000)', '(30000)', '(10)'], {}), '(3000, 30000, 10)\n', (5361, 5378), True, 'import numpy as np\n'), ((10016, 10039), 'numpy.isnan', 'np.isnan', (['arr_res[:, i]'], {}), '(arr_res[:, i])\n', (10024, 10039), True, 'import numpy as np\n'), ((14350, 14386), 'numpy.nansum', 'np.nansum', (['(err_corr[:, k] * nsamp_dt)'], {}), '(err_corr[:, k] * nsamp_dt)\n', (14359, 14386), True, 'import numpy as np\n'), ((14389, 14408), 'numpy.nansum', 'np.nansum', (['nsamp_dt'], {}), '(nsamp_dt)\n', (14398, 14408), True, 'import numpy as np\n'), ((14723, 14789), 'pyddem.volint_tools.neff_circ', 'neff_circ', (['area', "[(tmp_length, 'Sph', final_num_err_corr[k] ** 2)]"], {}), "(area, [(tmp_length, 'Sph', final_num_err_corr[k] ** 2)])\n", (14732, 14789), False, 'from pyddem.volint_tools import neff_circ, std_err\n'), ((15422, 15442), 'numpy.array', 'np.array', (['list_areas'], {}), '(list_areas)\n', (15430, 15442), True, 'import numpy as np\n'), ((15561, 15581), 'numpy.array', 'np.array', (['list_areas'], {}), '(list_areas)\n', (15569, 15581), True, 'import numpy as np\n'), ((19497, 19512), 'numpy.array', 'np.array', (['diff2'], {}), '(diff2)\n', (19505, 19512), True, 'import numpy as np\n'), ((19513, 19533), 'numpy.array', 'np.array', (['list_area2'], {}), '(list_area2)\n', (19521, 19533), True, 'import numpy as np\n'), ((22236, 22250), 'numpy.isnan', 'np.isnan', (['diff'], {}), '(diff)\n', (22244, 22250), True, 'import numpy as np\n'), ((22632, 22649), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (22640, 22649), True, 'import numpy as np\n'), ((24630, 24644), 'numpy.isnan', 'np.isnan', (['diff'], {}), '(diff)\n', (24638, 24644), True, 'import numpy as np\n'), ((25018, 25035), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (25026, 25035), True, 'import numpy as np\n'), ((3114, 3158), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[20 - 10:20 + 30]'], {}), '(df_c.exp.values[20 - 10:20 + 30])\n', (3124, 3158), True, 'import numpy as np\n'), ((3194, 3243), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[20 - 10:20 + 30]"], {}), "(df_c['count'].values[20 - 10:20 + 30])\n", (3204, 3243), True, 'import numpy as np\n'), ((3277, 3321), 'numpy.nanmean', 'np.nanmean', (['df_c.exp.values[50 - 40:50 + 40]'], {}), '(df_c.exp.values[50 - 40:50 + 40])\n', (3287, 3321), True, 'import numpy as np\n'), ((3357, 3406), 'numpy.nanmean', 'np.nanmean', (["df_c['count'].values[50 - 40:50 + 40]"], {}), "(df_c['count'].values[50 - 40:50 + 40])\n", (3367, 3406), True, 'import numpy as np\n'), ((10064, 10087), 'numpy.isnan', 'np.isnan', (['arr_res[:, i]'], {}), '(arr_res[:, i])\n', (10072, 10087), True, 'import numpy as np\n'), ((10350, 10382), 'numpy.sin', 'np.sin', (['(x / 365.2224 * 2 * np.pi)'], {}), '(x / 365.2224 * 2 * np.pi)\n', (10356, 10382), True, 'import numpy as np\n'), ((10426, 10458), 'numpy.sin', 'np.sin', (['(x / 365.2224 * 2 * np.pi)'], {}), '(x / 365.2224 * 2 * np.pi)\n', (10432, 10458), True, 'import numpy as np\n'), ((10464, 10496), 'numpy.sin', 'np.sin', (['(x / 365.2224 * 2 * np.pi)'], {}), '(x / 365.2224 * 2 * np.pi)\n', (10470, 10496), True, 'import numpy as np\n'), ((22036, 22048), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (22042, 22048), True, 'import numpy as np\n'), ((22116, 22170), 'numpy.abs', 'np.abs', (['df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0])\n', (22122, 22170), True, 'import numpy as np\n'), ((22576, 22593), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (22584, 22593), True, 'import numpy as np\n'), ((22938, 22967), 'numpy.round', 'np.round', (['(nb_95ci[i] * 100)', '(0)'], {}), '(nb_95ci[i] * 100, 0)\n', (22946, 22967), True, 'import numpy as np\n'), ((24430, 24442), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (24436, 24442), True, 'import numpy as np\n'), ((24510, 24564), 'numpy.abs', 'np.abs', (['df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0])\n', (24516, 24564), True, 'import numpy as np\n'), ((24962, 24979), 'numpy.isnan', 'np.isnan', (['ci_size'], {}), '(ci_size)\n', (24970, 24979), True, 'import numpy as np\n'), ((25228, 25257), 'numpy.round', 'np.round', (['(nb_95ci[i] * 100)', '(0)'], {}), '(nb_95ci[i] * 100, 0)\n', (25236, 25257), True, 'import numpy as np\n'), ((22055, 22109), 'numpy.abs', 'np.abs', (['df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0])\n', (22061, 22109), True, 'import numpy as np\n'), ((24449, 24503), 'numpy.abs', 'np.abs', (['df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]'], {}), '(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0])\n', (24455, 24503), True, 'import numpy as np\n'), ((19260, 19277), 'numpy.nanmean', 'np.nanmean', (['diff2'], {}), '(diff2)\n', (19270, 19277), True, 'import numpy as np\n'), ((19305, 19316), 'pybob.ddem_tools.nmad', 'nmad', (['diff2'], {}), '(diff2)\n', (19309, 19316), False, 'from pybob.ddem_tools import nmad\n')] |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(6.6, 3), dpi=90)
x = np.linspace(-2*np.pi, 2*np.pi, 1e3)
plt.plot(x, np.sin(x), label="$\sin(x)$")
plt.plot(x, np.cos(x), label="$\sin(x)$")
plt.xlim((np.min(x), np.max(x)))
plt.ylim((-1.1, 1.1))
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.savefig('trig.png', bbox_inches='tight', dpi=300)
plt.figure(figsize=(6.6, 3), dpi=90)
data = np.random.normal(0, 1, 100)
plt.hist(data, bins=10)
plt.xlabel(r"Data")
plt.ylabel("$\#$")
plt.savefig('hist.pgf', bbox_inches='tight', dpi=200)
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.min",
"numpy.max",
"numpy.linspace",
"numpy.random.normal",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((96, 132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.6, 3)', 'dpi': '(90)'}), '(figsize=(6.6, 3), dpi=90)\n', (106, 132), True, 'import matplotlib.pyplot as plt\n'), ((137, 179), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(1000.0)'], {}), '(-2 * np.pi, 2 * np.pi, 1000.0)\n', (148, 179), True, 'import numpy as np\n'), ((292, 313), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1, 1.1)'], {}), '((-1.1, 1.1))\n', (300, 313), True, 'import matplotlib.pyplot as plt\n'), ((315, 332), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (325, 332), True, 'import matplotlib.pyplot as plt\n'), ((333, 350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (343, 350), True, 'import matplotlib.pyplot as plt\n'), ((351, 404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trig.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('trig.png', bbox_inches='tight', dpi=300)\n", (362, 404), True, 'import matplotlib.pyplot as plt\n'), ((406, 442), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.6, 3)', 'dpi': '(90)'}), '(figsize=(6.6, 3), dpi=90)\n', (416, 442), True, 'import matplotlib.pyplot as plt\n'), ((451, 478), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (467, 478), True, 'import numpy as np\n'), ((480, 503), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': '(10)'}), '(data, bins=10)\n', (488, 503), True, 'import matplotlib.pyplot as plt\n'), ((505, 523), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data"""'], {}), "('Data')\n", (515, 523), True, 'import matplotlib.pyplot as plt\n'), ((525, 544), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\#$"""'], {}), "('$\\\\#$')\n", (535, 544), True, 'import matplotlib.pyplot as plt\n'), ((545, 598), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hist.pgf"""'], {'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('hist.pgf', bbox_inches='tight', dpi=200)\n", (556, 598), True, 'import matplotlib.pyplot as plt\n'), ((186, 195), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (192, 195), True, 'import numpy as np\n'), ((228, 237), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (234, 237), True, 'import numpy as np\n'), ((269, 278), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (275, 278), True, 'import numpy as np\n'), ((280, 289), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (286, 289), True, 'import numpy as np\n')] |
"""
This module provides utility methods.
"""
import datetime
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from plotnine import *
from statsmodels.tsa.statespace.kalman_smoother import SmootherResults
from statsmodels.tsa.statespace.mlemodel import MLEResults, MLEResultsWrapper
from state_space import SSMS
def plot_states(filtered_results: MLEResultsWrapper, smoothed_results: SmootherResults, regions: list, z_names: list,
save_path: str):
"""
Plots states (all variables specified in z_names) and saves it in save_path.
The dataframe states contains all the states (mu, nu, z_names) over time.
:param filtered_results: filtered results from a SSMS class
:param smoothed_results: smoothed results from a SSMS class, smoothed results should be an MLEResultsWrapper
if you don't wanted smoothed states
:param regions: list of region names
:param z_names: a list of column names of the independent variables to be placed in the Z (design) matrix
:param save_path: save path for plots
:return:
"""
n_regions = len(regions)
n_betas = len(z_names)
# Create confidence intervals for states (first n_regions*3 parameters are for the variances of y, mu and nu)
if isinstance(smoothed_results, MLEResultsWrapper):
states = np.transpose(filtered_results.filtered_state)
cis = np.zeros((states.shape[0], n_betas * 3))
# We use the state_cov (covariance matrix of state equation Q) to calculate the ci's
bound = 1.96 * np.sqrt(filtered_results.params[n_regions * 3:])
else:
states = np.transpose(smoothed_results.smoothed_state)
cis = np.zeros((states.shape[0], n_betas * 3))
# We use the state_cov (covariance matrix of state equation Q) to calculate the ci's
bound = 1.96 * np.sqrt(filtered_results.params[n_regions * 3:])
for i in range(n_betas):
cis[:, i] = states[:, n_regions * 2 + i] - bound[i]
cis[:, i + n_betas] = states[:, n_regions * 2 + i] + bound[i]
cis[:, i + n_betas * 2] = np.multiply(cis[:, i], cis[:, i + n_betas])
cis[:, i + n_betas * 2][cis[:, i + n_betas * 2] < 0] = 0
cis[:, i + n_betas * 2][cis[:, i + n_betas * 2] > 0] = 1
# Create list cols with columns names for states Dataframe
cols = []
for i in range(states.shape[1] + n_betas * 3):
if i < n_regions:
cols.append('nu_' + regions[i])
elif n_regions <= i < n_regions * 2:
cols.append('mu_' + regions[i - n_regions])
elif n_regions * 2 <= i < n_regions * 2 + n_betas:
cols.append(z_names[i - n_regions * 2])
elif n_regions * 2 + n_betas <= i < n_regions * 2 + n_betas * 2:
cols.append(z_names[i - (n_regions * 2 + n_betas)] + '_lb')
elif n_regions * 2 + n_betas * 2 <= i < n_regions * 2 + n_betas * 3:
cols.append(z_names[i - (n_regions * 2 + n_betas * 2)] + '_ub')
else:
cols.append(z_names[i - (n_regions * 2 + n_betas * 3)] + '_significant')
states_df = pd.DataFrame(np.concatenate((states, cis), axis=1), columns=cols)
states_df['Date'] = pd.date_range(start='1/7/2018', periods=len(states_df), freq='W')
states_df_01 = states_df.iloc[:, -(n_betas * 4 + 1):]
states_df_01['Date'] = states_df_01['Date'].dt.strftime('%G%V')
if isinstance(smoothed_results, MLEResultsWrapper):
states_df_01.to_excel(os.path.join(save_path, 'states_filtered.xlsx'))
else:
states_df_01.to_excel(os.path.join(save_path, 'states_smoothed.xlsx'))
# The first 5 observations are removed for nice graphs
states_df = states_df.iloc[5:, :]
# Important events are the first intelligent lockdown and relaxation of rules
events = [datetime.datetime.strptime('2020-11-7', '%G-%V-%u'), datetime.datetime.strptime('2020-27-7', '%G-%V-%u')]
events_full = [*events, *[datetime.datetime.strptime('2020-51-7', '%G-%V-%u'),
datetime.datetime.strptime('2021-25-7', '%G-%V-%u')]]
for i in range(n_betas):
if i == z_names.index('StringencyIndex'):
# Remove 0-values when plotting StringencyIndex
states_df_02 = states_df[108:]
else:
states_df_02 = states_df
p = ggplot(states_df_02, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(states_df_02, 8)[0],
labels=get_ticks(states_df_02, 8)[1]) + geom_ribbon(
aes(ymin=states_df_02.iloc[:, n_regions * 2 + n_betas + i],
ymax=states_df_02.iloc[:, n_regions * 2 + n_betas * 2 + i], color='"95% CI"'), alpha=0.1) + geom_line(
aes(y=states_df_02.columns[n_regions * 2 + i], color='"State"')) + geom_vline(xintercept=events_full,
linetype="dotted") + \
geom_vline(
xintercept=[datetime.datetime.strptime('2020-50-7', '%G-%V-%u')], linetype="solid") + scale_color_manual(
values=['#dedede', '#4472c4']) + labs(x='Date', y='State', color='Legend')
if isinstance(smoothed_results, MLEResultsWrapper):
ggsave(plot=p, filename='coefficient_for_filtered_' + z_names[i], path=save_path, verbose=False, dpi=600)
else:
ggsave(plot=p, filename='coefficient_for_smoothed_' + z_names[i], path=save_path, verbose=False,
dpi=600) # print(p)
def forecast_error(results: MLEResults, regions: list, save_path: str, first=int, last=int, ci=bool, tp=str, n_plots=4):
"""
Computes forecast error with one-step ahead forecasts for each region and saves it in save_path. Moreover, plots
forecasts, actual sales and errors of the n_plots best/worst MASE/MdASE regions (only MASE plots are saved).
:param results: (extended) results (from prepare_forecast())
:param regions: list of region names,
the order of the names should be exactly the same as the order of the regions in the model
:param save_path: save path for plots
:param first: the time index from where your plots should start
:param last: this time index should exactly be equal to the time index-1 where the sample of the model ends
:param ci: whether to plot a confidence interval (True) or not (False),
if the CI's become too big set ci=False otherwise the sales will be plotted as straight lines
:param tp: specify the type of data (e.g. one_step_ahead_forecast) you want to plot,
use _ instead of spaces for tp, since the name of the plots/excel files will also have this name
:param n_plots: the number of regions to plot, 4 (default) implies plotting the forecasts, actual sales
and errors of the 4 best/worst MASE/MdASE regions (= 3 * 4 * 2 * 2 = 48 plots)
:return:
"""
n_regions = len(regions)
model = results.model
data = results.get_prediction(start=first, end=last)
# Calculate MASE using one-step ahead forecasts
mases = np.zeros(len(regions))
maes = np.zeros((38, len(regions)))
maes_naive = np.zeros((152, len(regions)))
mdases = np.zeros(len(regions))
for region in range(len(regions)):
maes[:, region] = np.abs(model.endog[first:, region] - data.predicted_mean[:, region])
maes_naive[:, region] = np.abs(
[x - model.endog[0:153, region][i - 1] for i, x in enumerate(model.endog[0:153, region])][1:])
mases[region] = np.mean(maes[:, region]) / np.mean(maes_naive[:, region])
mdases[region] = np.median(maes[:, region]) / np.median(maes_naive[:, region])
mean_mase = np.mean(mases)
med_mase = np.median(mases)
mean_mdase = np.mean(mdases)
med_mdase = np.median(mdases)
l1_mase = sum(x < 1 for x in mases) / mases.shape[0]
l1_mdase = sum(x < 1 for x in mdases) / mdases.shape[0]
best_mase, worst_mase = np.argmin(mases), np.argmax(mases)
best_mdase, worst_mdase = np.argmin(mdases), np.argmax(mdases)
mase_df = pd.DataFrame(np.transpose(mases.reshape(1, n_regions)), index=regions, columns=['MASE'])
mdase_df = pd.DataFrame(np.transpose(mdases.reshape(1, n_regions)), index=regions, columns=['MdASE'])
error_df = mase_df.merge(mdase_df, left_index=True, right_index=True, how='left')
error_df[''] = ''
error_df['Best MASE'] = [regions[best_mase], mases[best_mase], '', 'Best MdASE', regions[best_mdase],
mdases[best_mdase]] + [''] * (len(error_df) - 6)
error_df['Worst MASE'] = [regions[worst_mase], mases[worst_mase], '', 'Worst MdASE', regions[worst_mdase],
mdases[worst_mdase]] + [''] * (len(error_df) - 6)
error_df['Mean MASE'] = [mean_mase] + [''] * 2 + ['Mean MdASE', mean_mdase] + [''] * (len(error_df) - 5)
error_df['Median MASE'] = [med_mase] + [''] * 2 + ['Median MdASE', med_mdase] + [''] * (len(error_df) - 5)
error_df['Proportion of regions MASE<1'] = [l1_mase] + [''] * 2 + ['Proportion of regions MdASE<1', l1_mdase] + [
''] * (len(error_df) - 5)
error_df.to_excel(os.path.join(save_path, 'errors_' + tp + '.xlsx'))
# Plot forecasts (df_pred), actual sales (df_full) and MAE/MAE_naive (df_mae)
df_pred = pd.DataFrame(np.concatenate((model.endog[first:, :], data.predicted_mean, data.conf_int()), axis=1))
start_date = datetime.datetime(2018, 1, 7) + datetime.timedelta(weeks=first)
df_pred['Date'] = pd.date_range(start=start_date, periods=len(df_pred), freq='W')
df_full = pd.DataFrame(model.endog)
df_full['Date'] = pd.date_range(start=datetime.datetime(2018, 1, 7), periods=len(df_full), freq='W')
df_mae = pd.DataFrame(np.concatenate((maes_naive, maes), axis=0))
# MAE starts in 2018 week 2 (sunday) because the naive forecast (denominator of mase) starts at t=2
df_mae['Date'] = pd.date_range(start=datetime.datetime(2018, 1, 14), periods=len(df_mae), freq='W')
plot_regions = np.concatenate((mases.argsort()[:n_plots], mases.argsort()[-n_plots:][::-1],
mdases.argsort()[:n_plots], mdases.argsort()[-n_plots:][::-1]), axis=0)
# Important events are the second lockdown and relaxation of (almost all) rules
events_test = [datetime.datetime.strptime('2020-51-7', '%G-%V-%u'),
datetime.datetime.strptime('2021-25-7', '%G-%V-%u')]
events_full = [
*[datetime.datetime.strptime('2020-11-7', '%G-%V-%u'), datetime.datetime.strptime('2020-27-7', '%G-%V-%u')],
*events_test]
for i in range(plot_regions.shape[0]):
if ci:
p = ggplot(df_pred, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_pred, 8)[0],
labels=get_ticks(df_pred, 8)[1]) + geom_ribbon(
aes(ymin=df_pred.iloc[:, n_regions * 2 + plot_regions[i]],
ymax=df_pred.iloc[:, n_regions * 3 + plot_regions[i]], color='"95% CI"'), alpha=0.1) + geom_line(
aes(y=df_pred.iloc[:, plot_regions[i]], color='"Actual"')) + geom_line(
aes(y=df_pred.iloc[:, n_regions + plot_regions[i]], color='"Forecast"')) + geom_vline(
xintercept=events_test, linetype="dotted") + scale_color_manual(
values=['#dedede', '#4472c4', '#ed7d31']) + labs(x='Date', y='Sales', color='Legend')
q = ggplot(df_full, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_full, 8)[0],
labels=get_ticks(df_full, 8)[1]) + geom_line(
aes(y=df_full.iloc[:, plot_regions[i]], color='"Actual"')) + geom_vline(xintercept=events_full,
linetype="dotted") + geom_vline(
xintercept=[datetime.datetime.strptime('2020-50-7', '%G-%V-%u')],
linetype="solid") + scale_color_manual(values=['#4472c4']) + labs(x='Date', y='Sales', color='Legend')
m = ggplot(df_mae, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_mae, 8)[0],
labels=get_ticks(df_mae, 8)[1]) + geom_line(
aes(y=df_mae.iloc[0:153, plot_regions[i]], color='"AE_naive"'),
data=df_mae['Date'][0:153].to_frame()) + geom_line(
aes(y=df_mae.iloc[152:190, plot_regions[i]], color='"AE"'),
data=df_mae['Date'][152:190].to_frame()) + geom_vline(xintercept=events_full,
linetype="dotted") + geom_vline(
xintercept=[datetime.datetime.strptime('2020-50-7', '%G-%V-%u')],
linetype="solid") + scale_color_manual(values=['#4472c4', '#ed7d31']) + labs(x='Date', y='Error',
color='Legend')
else:
p = ggplot(df_pred, aes(x='Date')) + scale_x_datetime(breaks=get_ticks(df_pred, 8)[0],
labels=get_ticks(df_pred, 8)[1]) + geom_line(
aes(y=df_pred.iloc[:, plot_regions[i]], color='"Actual"')) + geom_line(
aes(y=df_pred.iloc[:, n_regions + plot_regions[i]], color='"Forecast"')) + geom_vline(
xintercept=events_test, linetype="dotted") + labs(x='Date', y='Sales')
# print(m)
if i < n_plots:
ggsave(plot=p, filename=tp + '_mase_best_' + str(i + 1) + '_' + regions[plot_regions[i]], path=save_path,
verbose=False, dpi=600)
ggsave(plot=q, filename='actual_sales_mase_best_' + str(i + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=m, filename='mase_best_' + str(i + 1) + '_' + regions[plot_regions[i]], path=save_path,
verbose=False, dpi=600)
elif i < n_plots * 2:
ggsave(plot=p, filename=tp + '_mase_worst_' + str(i - n_plots + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=q, filename='actual_sales_mase_worst_' + str(i - n_plots + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=m, filename='mase_worst_' + str(i - n_plots + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
"""
elif i < n_plots * 3:
ggsave(plot=p, filename=tp + '_mdase_best_' + str(i - n_plots * 2 + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=q,
filename='actual_sales_mdase_best_' + str(i - n_plots * 2 + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=m, filename='mdase_best_' + str(i - n_plots * 2 + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
else:
ggsave(plot=p, filename=tp + '_mdase_worst_' + str(i - n_plots * 3 + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=q,
filename='actual_sales_mdase_worst_' + str(i - n_plots * 3 + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
ggsave(plot=m, filename='mdase_worst_' + str(i - n_plots * 3 + 1) + '_' + regions[plot_regions[i]],
path=save_path, verbose=False, dpi=600)
"""
def get_ticks(data: pd.DataFrame, n_ticks: int):
"""
Returns x_axis ticks as dates,
:param data: dataframe where the last column should contain pandas.Timestamp objects
:param n_ticks: number of ticks
:return: ticks (breaks) and their labels
"""
x_breaks = []
x_labels = []
n_ticks = n_ticks - 1
interval = data.shape[0] / n_ticks
for i in range(n_ticks + 1):
x_breaks.append(data.iloc[0, -1:][0] + datetime.timedelta(weeks=interval * i))
x_labels.append((data.iloc[0, -1:][0] + datetime.timedelta(weeks=interval * i)).strftime('%G-%V'))
return x_breaks, x_labels
def print_results(results: MLEResults, save_path: str, name: str):
"""
Pretty-prints the results for an SSMS model with k variables of interest (in beta equations). Assumes n > k.
:param results: results object for an SSMS model
:param save_path: path to save location
:param name: model name
:return:
"""
model = results.model
if not isinstance(model, SSMS):
print("Can't print parameters for a non-SSMS model.")
return
# Print AIC, BIC, MSE, and MAE.
with open(os.path.join(save_path, name + '_stats.csv'), 'w') as out:
header = ','.join(['AIC', 'BIC', 'MSE', 'MAE'])
stats = ','.join([str(results.aic), str(results.bic), str(results.mse), str(results.mae)])
out.write('\n'.join([header, stats]))
# Print fitted parameters, standard errors, and p-values.
regions = model.group_names
params = results.params
n = len(regions)
k = model.k
n_cov = model.n_cov
param_names = model.z_names
y = ','.join(['region', 'var (y)'])
mu = 'var (mu)'
nu = 'var (nu)'
lm = ','.join(['param', 'var'])
header = ',,'.join([y, mu, nu, lm])
param_from = 0
if model.cov_rest == 'GC':
param_to = n + n_cov
y_var = params[param_from:param_from + n]
else:
param_to = n
y_var = params[param_from:param_to]
param_from = param_to
param_to += n
mu_var = params[param_from:param_to]
param_from = param_to
param_to += n
nu_var = params[param_from:param_to]
param_from = param_to
param_to += k
param_var = params[param_from:]
with open(os.path.join(save_path, name + '_params.csv'), 'w') as out:
out.write(header + '\n')
for i in range(n):
y = ','.join([regions[i], str(y_var[i])])
mu = str(mu_var[i])
nu = str(nu_var[i])
line = ',,'.join([y, mu, nu])
if i < k:
lm = ','.join([param_names[i], str(param_var[i])])
line = ',,'.join([line, lm])
out.write(line + '\n')
def plot_variables(data: list, info: list, all_regions: False):
"""
Plots variables.
:param data: list of form [y, mu, threshold, obs_sd]
:param info: list of from [index, name]
:param all_regions: boolean to plot regions 1-by-1 (True) or all at the same time (False)
:return:
"""
if all_regions:
if info:
t = np.arange(1, len(data[0][0]) + 1)
for i in range(len(info)):
index = info[i][0]
plt.figure()
plt.suptitle(info[i][1])
plt.plot(t, data[index][0], 'b')
plt.plot(t, data[index][1] + data[index][2] * data[index][3], 'r')
plt.plot(t, data[index][1] - data[index][2] * data[index][3], 'r')
plt.show()
else:
print('No outliers')
else:
if info:
t = np.arange(1, len(data[0][0]) + 1)
for i in range(len(info)):
index = info[i][0]
plt.figure()
plt.suptitle(info[i][1])
plt.plot(t, data[index][0], 'b')
plt.plot(t, data[index][1] + data[index][2] * data[index][3], 'r')
plt.plot(t, data[index][1] - data[index][2] * data[index][3], 'r')
else:
print('No outliers')
plt.show()
def prepare_forecast(results: MLEResults, data: pd.DataFrame):
"""
Prepares a new MLEResults object, such that regular methods can be used to compute forecasts. For out-of-sample
forecasts, we can simply use 'in-sample' forecasts of a model with fixed parameters, obtained from the initial fit.
:param results: the MLEResults object of the training fit
:param data: the extended data (train + test)
:return: a new NLEResults object, fitted with fixed parameters obtained from the initial training fit
"""
model = results.model
if not isinstance(model, SSMS):
print("Can't prepare forecasts for a non-SSMS model.")
return
new_model = SSMS(data, group_name=model.group_name, y_name=model.y_name, z_names=model.z_names,
cov_rest=model.cov_rest)
fitted_params = results.params
new_result = new_model.filter(fitted_params)
return new_model, new_result
| [
"numpy.abs",
"numpy.argmax",
"matplotlib.pyplot.suptitle",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.mean",
"os.path.join",
"pandas.DataFrame",
"numpy.multiply",
"numpy.transpose",
"datetime.timedelta",
"matplotlib.pyplot.show",
"numpy.median",
"datetime.datetime",
"datetime.dat... | [((7666, 7680), 'numpy.mean', 'np.mean', (['mases'], {}), '(mases)\n', (7673, 7680), True, 'import numpy as np\n'), ((7696, 7712), 'numpy.median', 'np.median', (['mases'], {}), '(mases)\n', (7705, 7712), True, 'import numpy as np\n'), ((7730, 7745), 'numpy.mean', 'np.mean', (['mdases'], {}), '(mdases)\n', (7737, 7745), True, 'import numpy as np\n'), ((7762, 7779), 'numpy.median', 'np.median', (['mdases'], {}), '(mdases)\n', (7771, 7779), True, 'import numpy as np\n'), ((9545, 9570), 'pandas.DataFrame', 'pd.DataFrame', (['model.endog'], {}), '(model.endog)\n', (9557, 9570), True, 'import pandas as pd\n'), ((20451, 20564), 'state_space.SSMS', 'SSMS', (['data'], {'group_name': 'model.group_name', 'y_name': 'model.y_name', 'z_names': 'model.z_names', 'cov_rest': 'model.cov_rest'}), '(data, group_name=model.group_name, y_name=model.y_name, z_names=model.\n z_names, cov_rest=model.cov_rest)\n', (20455, 20564), False, 'from state_space import SSMS\n'), ((1347, 1392), 'numpy.transpose', 'np.transpose', (['filtered_results.filtered_state'], {}), '(filtered_results.filtered_state)\n', (1359, 1392), True, 'import numpy as np\n'), ((1407, 1447), 'numpy.zeros', 'np.zeros', (['(states.shape[0], n_betas * 3)'], {}), '((states.shape[0], n_betas * 3))\n', (1415, 1447), True, 'import numpy as np\n'), ((1640, 1685), 'numpy.transpose', 'np.transpose', (['smoothed_results.smoothed_state'], {}), '(smoothed_results.smoothed_state)\n', (1652, 1685), True, 'import numpy as np\n'), ((1700, 1740), 'numpy.zeros', 'np.zeros', (['(states.shape[0], n_betas * 3)'], {}), '((states.shape[0], n_betas * 3))\n', (1708, 1740), True, 'import numpy as np\n'), ((2099, 2142), 'numpy.multiply', 'np.multiply', (['cis[:, i]', 'cis[:, i + n_betas]'], {}), '(cis[:, i], cis[:, i + n_betas])\n', (2110, 2142), True, 'import numpy as np\n'), ((3109, 3146), 'numpy.concatenate', 'np.concatenate', (['(states, cis)'], {'axis': '(1)'}), '((states, cis), axis=1)\n', (3123, 3146), True, 'import numpy as np\n'), ((3795, 3846), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-11-7"""', '"""%G-%V-%u"""'], {}), "('2020-11-7', '%G-%V-%u')\n", (3821, 3846), False, 'import datetime\n'), ((3848, 3899), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-27-7"""', '"""%G-%V-%u"""'], {}), "('2020-27-7', '%G-%V-%u')\n", (3874, 3899), False, 'import datetime\n'), ((7265, 7333), 'numpy.abs', 'np.abs', (['(model.endog[first:, region] - data.predicted_mean[:, region])'], {}), '(model.endog[first:, region] - data.predicted_mean[:, region])\n', (7271, 7333), True, 'import numpy as np\n'), ((7926, 7942), 'numpy.argmin', 'np.argmin', (['mases'], {}), '(mases)\n', (7935, 7942), True, 'import numpy as np\n'), ((7944, 7960), 'numpy.argmax', 'np.argmax', (['mases'], {}), '(mases)\n', (7953, 7960), True, 'import numpy as np\n'), ((7991, 8008), 'numpy.argmin', 'np.argmin', (['mdases'], {}), '(mdases)\n', (8000, 8008), True, 'import numpy as np\n'), ((8010, 8027), 'numpy.argmax', 'np.argmax', (['mdases'], {}), '(mdases)\n', (8019, 8027), True, 'import numpy as np\n'), ((9114, 9163), 'os.path.join', 'os.path.join', (['save_path', "('errors_' + tp + '.xlsx')"], {}), "(save_path, 'errors_' + tp + '.xlsx')\n", (9126, 9163), False, 'import os\n'), ((9380, 9409), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(7)'], {}), '(2018, 1, 7)\n', (9397, 9409), False, 'import datetime\n'), ((9412, 9443), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': 'first'}), '(weeks=first)\n', (9430, 9443), False, 'import datetime\n'), ((9703, 9745), 'numpy.concatenate', 'np.concatenate', (['(maes_naive, maes)'], {'axis': '(0)'}), '((maes_naive, maes), axis=0)\n', (9717, 9745), True, 'import numpy as np\n'), ((10262, 10313), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-51-7"""', '"""%G-%V-%u"""'], {}), "('2020-51-7', '%G-%V-%u')\n", (10288, 10313), False, 'import datetime\n'), ((10334, 10385), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2021-25-7"""', '"""%G-%V-%u"""'], {}), "('2021-25-7', '%G-%V-%u')\n", (10360, 10385), False, 'import datetime\n'), ((19746, 19756), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19754, 19756), True, 'from matplotlib import pyplot as plt\n'), ((1564, 1612), 'numpy.sqrt', 'np.sqrt', (['filtered_results.params[n_regions * 3:]'], {}), '(filtered_results.params[n_regions * 3:])\n', (1571, 1612), True, 'import numpy as np\n'), ((1857, 1905), 'numpy.sqrt', 'np.sqrt', (['filtered_results.params[n_regions * 3:]'], {}), '(filtered_results.params[n_regions * 3:])\n', (1864, 1905), True, 'import numpy as np\n'), ((3464, 3511), 'os.path.join', 'os.path.join', (['save_path', '"""states_filtered.xlsx"""'], {}), "(save_path, 'states_filtered.xlsx')\n", (3476, 3511), False, 'import os\n'), ((3553, 3600), 'os.path.join', 'os.path.join', (['save_path', '"""states_smoothed.xlsx"""'], {}), "(save_path, 'states_smoothed.xlsx')\n", (3565, 3600), False, 'import os\n'), ((7505, 7529), 'numpy.mean', 'np.mean', (['maes[:, region]'], {}), '(maes[:, region])\n', (7512, 7529), True, 'import numpy as np\n'), ((7532, 7562), 'numpy.mean', 'np.mean', (['maes_naive[:, region]'], {}), '(maes_naive[:, region])\n', (7539, 7562), True, 'import numpy as np\n'), ((7588, 7614), 'numpy.median', 'np.median', (['maes[:, region]'], {}), '(maes[:, region])\n', (7597, 7614), True, 'import numpy as np\n'), ((7617, 7649), 'numpy.median', 'np.median', (['maes_naive[:, region]'], {}), '(maes_naive[:, region])\n', (7626, 7649), True, 'import numpy as np\n'), ((9613, 9642), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(7)'], {}), '(2018, 1, 7)\n', (9630, 9642), False, 'import datetime\n'), ((9892, 9922), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(1)', '(14)'], {}), '(2018, 1, 14)\n', (9909, 9922), False, 'import datetime\n'), ((16864, 16908), 'os.path.join', 'os.path.join', (['save_path', "(name + '_stats.csv')"], {}), "(save_path, name + '_stats.csv')\n", (16876, 16908), False, 'import os\n'), ((17972, 18017), 'os.path.join', 'os.path.join', (['save_path', "(name + '_params.csv')"], {}), "(save_path, name + '_params.csv')\n", (17984, 18017), False, 'import os\n'), ((3931, 3982), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-51-7"""', '"""%G-%V-%u"""'], {}), "('2020-51-7', '%G-%V-%u')\n", (3957, 3982), False, 'import datetime\n'), ((4014, 4065), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2021-25-7"""', '"""%G-%V-%u"""'], {}), "('2021-25-7', '%G-%V-%u')\n", (4040, 4065), False, 'import datetime\n'), ((10417, 10468), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-11-7"""', '"""%G-%V-%u"""'], {}), "('2020-11-7', '%G-%V-%u')\n", (10443, 10468), False, 'import datetime\n'), ((10470, 10521), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-27-7"""', '"""%G-%V-%u"""'], {}), "('2020-27-7', '%G-%V-%u')\n", (10496, 10521), False, 'import datetime\n'), ((16160, 16198), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': '(interval * i)'}), '(weeks=interval * i)\n', (16178, 16198), False, 'import datetime\n'), ((18912, 18924), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18922, 18924), True, 'from matplotlib import pyplot as plt\n'), ((18941, 18965), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['info[i][1]'], {}), '(info[i][1])\n', (18953, 18965), True, 'from matplotlib import pyplot as plt\n'), ((18982, 19014), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data[index][0]', '"""b"""'], {}), "(t, data[index][0], 'b')\n", (18990, 19014), True, 'from matplotlib import pyplot as plt\n'), ((19031, 19097), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] + data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] + data[index][2] * data[index][3], 'r')\n", (19039, 19097), True, 'from matplotlib import pyplot as plt\n'), ((19114, 19180), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] - data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] - data[index][2] * data[index][3], 'r')\n", (19122, 19180), True, 'from matplotlib import pyplot as plt\n'), ((19197, 19207), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19205, 19207), True, 'from matplotlib import pyplot as plt\n'), ((19422, 19434), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19432, 19434), True, 'from matplotlib import pyplot as plt\n'), ((19451, 19475), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['info[i][1]'], {}), '(info[i][1])\n', (19463, 19475), True, 'from matplotlib import pyplot as plt\n'), ((19492, 19524), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data[index][0]', '"""b"""'], {}), "(t, data[index][0], 'b')\n", (19500, 19524), True, 'from matplotlib import pyplot as plt\n'), ((19541, 19607), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] + data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] + data[index][2] * data[index][3], 'r')\n", (19549, 19607), True, 'from matplotlib import pyplot as plt\n'), ((19624, 19690), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(data[index][1] - data[index][2] * data[index][3])', '"""r"""'], {}), "(t, data[index][1] - data[index][2] * data[index][3], 'r')\n", (19632, 19690), True, 'from matplotlib import pyplot as plt\n'), ((16248, 16286), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': '(interval * i)'}), '(weeks=interval * i)\n', (16266, 16286), False, 'import datetime\n'), ((4992, 5043), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-50-7"""', '"""%G-%V-%u"""'], {}), "('2020-50-7', '%G-%V-%u')\n", (5018, 5043), False, 'import datetime\n'), ((11856, 11907), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-50-7"""', '"""%G-%V-%u"""'], {}), "('2020-50-7', '%G-%V-%u')\n", (11882, 11907), False, 'import datetime\n'), ((12685, 12736), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2020-50-7"""', '"""%G-%V-%u"""'], {}), "('2020-50-7', '%G-%V-%u')\n", (12711, 12736), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 22:43:38 2019
@author: anhtu
"""
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import cv2
from util import *
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
class YoloLayer(nn.Module):
def __init__(self, anchors):
super(YoloLayer, self).__init__()
self.anchors = anchors
def parse_cfg(filename):
"""
Inputs:
- cfg's file name, e.g. 'yolov3.cfg'
Returns:
- a list of NN blocks, each block is represented as a dictionary
"""
file = open(filename, 'r')
lines = file.read().split('\n')
lines = [x.rstrip().lstrip() for x in lines if len(x) > 0 and x[0] != '#']
blocks = []
block = {}
for line in lines:
if line[0] == '[':
if len(block) != 0:
blocks.append(block)
block = {}
block['type'] = line[1:-1].rstrip()
else:
s = line.split('=')
block[s[0].lstrip().rstrip()] = s[1].lstrip().rstrip()
blocks.append(block)
return blocks
def create_modules(blocks):
net_info = blocks[0] # [net] contains the info of the entire network
module_list = nn.ModuleList()
prev_filters = 3 # initialized with first number of channels (3 - R, G, B)
output_filters = []
for idx, layer in enumerate(blocks[1:]):
module = nn.Sequential()
# CONV layer
if layer['type'] == 'convolutional':
activation = layer['activation']
try:
batchnorm = int(layer['batch_normalize'])
bias = False
except:
batchnorm = 0
bias = True
filters = int(layer['filters'])
kernel_size = int(layer['size'])
stride = int(layer['stride'])
pad = int(layer['pad'])
padding = None
# pad & padding are different
if pad == 0:
padding = 0
else:
padding = kernel_size // 2
conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, padding, bias=bias)
module.add_module('conv_{}'.format(idx), conv)
if batchnorm:
bn = nn.BatchNorm2d(filters)
module.add_module('batch_norm_{}'.format(idx), bn)
if activation == 'leaky':
leaky = nn.LeakyReLU(0.1) # 0.1 according to YOLOv1 paper
module.add_module('leaky_{}'.format(idx), leaky)
# Upsample layer
elif layer['type'] == 'upsample':
stride = int(layer['stride'])
upsample = nn.Upsample(scale_factor=2, mode='nearest')
module.add_module('upsample_{}'.format(idx), upsample)
# Concatenation layer
elif layer['type'] == 'route':
layer['layers'] = layer['layers'].split(',')
start_layer = int(layer['layers'][0])
try:
end_layer = int(layer['layers'][1])
except:
end_layer = 0
route = EmptyLayer()
module.add_module('route_{}'.format(idx), route)
if end_layer == 0:
filters = output_filters[start_layer + idx]
else:
filters = output_filters[start_layer + idx] + output_filters[end_layer]
# Shortcut layer (skip connection)
elif layer['type'] == 'shortcut':
shortcut = EmptyLayer()
module.add_module('shortcut_{}'.format(idx), shortcut)
# YOLO layer
elif layer["type"] == "yolo":
mask = layer["mask"].split(",")
mask = [int(x) for x in mask]
anchors = layer["anchors"].split(",")
anchors = [int(a) for a in anchors]
anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]
anchors = [anchors[i] for i in mask]
detection = YoloLayer(anchors)
module.add_module("Detection_{}".format(idx), detection)
module_list.append(module)
prev_filters = filters
output_filters.append(filters)
return (net_info, module_list)
## create network
class Net(nn.Module):
def __init__(self, filename):
super(Net, self).__init__()
self.blocks = parse_cfg(filename)
self.net_info, self.module_list = create_modules(self.blocks)
def __str__(self):
return ('** Information about the network: ' + str(self.net_info) + '\n\n' +
'** All layers of the network: \n' + str(self.module_list))
def forward(self, x):
layers = self.blocks[1:] # except the 'net' module
outputs = {}
yolo_calc = 0
for idx, layer in enumerate(layers):
if layer['type'] == 'convolutional' or layer['type'] == 'upsample':
x = self.module_list[idx](x)
elif layer['type'] == 'route':
l = [int(x) for x in layer['layers']]
if len(l) == 1:
x = outputs[idx + l[0]]
else:
out1 = outputs[idx + l[0]]
out2 = outputs[l[1]]
x = torch.cat([out1, out2], dim=1)
elif layer['type'] == 'shortcut':
x = outputs[int(layer['from'])+idx] + outputs[idx-1]
elif layer['type'] == 'yolo':
anchors = self.module_list[idx][0].anchors
inp_dims = (int(self.net_info['height']), int(self.net_info['width']))
num_classes = int(layer['classes'])
# x has shape (batch_size, (4+1+80)*3, N, N)
# in which, 4: bbox offsets, 1: objectness score, 80: classes, 3: num of boxes, N: box's dimension
x = x.data # just need the data, seperate from autograd
x = process_prediction(x, inp_dims, anchors, num_classes)
if not yolo_calc: #if no collector has been intialised.
detections = x
yolo_calc = 1
else:
detections = torch.cat((detections, x), 1)
outputs[idx] = x
return detections
def load_weights(self, weightfile):
fp = open(weightfile, "rb")
track = 0 # track is the total number of params which have been already used
#The first 5 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4,5. Images seen by the network (during training)
header = np.fromfile(fp, dtype = np.int32, count = 5)
params = np.fromfile(fp, dtype = np.float32)
fp.close()
for i in range(len(self.module_list)):
block = self.blocks[i+1] # ignore the first net info block
if block['type'] == 'convolutional':
try:
batchnorm = int(block['batch_normalize'])
except:
batchnorm = 0
model = self.module_list[i]
# CNN module contains: CNN, batchnorm, leaky ReLU (no weights -> ignore)
conv = model[0]
if batchnorm:
bn = model[1]
num_bn_params = bn.weight.numel()
# get parameters, then reshape to the same shape as parameter tensors
bn_bias = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.bias)
track += num_bn_params
bn_weights = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.weight)
track += num_bn_params
bn_running_mean = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.running_mean)
track += num_bn_params
bn_running_var = torch.from_numpy(params[track:track+num_bn_params]).view_as(bn.running_var)
track += num_bn_params
# copy values into parameter tensors
bn.bias.data.copy_(bn_bias)
bn.weight.data.copy_(bn_weights)
bn.running_mean.detach().copy_(bn_running_mean)
bn.running_var.data.copy_(bn_running_var)
else:
num_conv_bias = conv.bias.numel()
conv_bias = torch.from_numpy(params[track:track+num_conv_bias]).view_as(conv.bias)
track += num_conv_bias
conv.bias.data.copy_(conv_bias)
num_conv_weights = conv.weight.numel()
conv_weights = torch.from_numpy(params[track:track+num_conv_weights]).view_as(conv.weight)
track += num_conv_weights
conv.weight.data.copy_(conv_weights)
print('* Weights have been successfully loaded!\
\n- Number of model\'s params: %d\
\n- Number of cfg\'s params: %d' %(track, len(params)))
def get_test_input():
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (416,416)) #Resize to the input dimension
img_ = img[:,:,::-1].transpose((2,0,1)) # BGR -> RGB | H x W x C -> C x H x W
img_ = img_[np.newaxis,:,:,:]/255.0 #Add a channel at 0 (for batch) | Normalise
img_ = torch.from_numpy(img_).float() #Convert to float
return img_
#model = None
#model = Net("cfg/yolov3.cfg")
##model.load_weights('yolov3.weights')
#inp = get_test_input()
#pred = model(inp) | [
"torch.nn.Sequential",
"numpy.fromfile",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.cat",
"cv2.imread",
"torch.nn.BatchNorm2d",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"cv2.resize",
"torch.from_numpy"
] | [((1369, 1384), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1382, 1384), True, 'import torch.nn as nn\n'), ((9441, 9472), 'cv2.imread', 'cv2.imread', (['"""dog-cycle-car.png"""'], {}), "('dog-cycle-car.png')\n", (9451, 9472), False, 'import cv2\n'), ((9483, 9510), 'cv2.resize', 'cv2.resize', (['img', '(416, 416)'], {}), '(img, (416, 416))\n', (9493, 9510), False, 'import cv2\n'), ((1556, 1571), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1569, 1571), True, 'import torch.nn as nn\n'), ((6887, 6927), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.int32', 'count': '(5)'}), '(fp, dtype=np.int32, count=5)\n', (6898, 6927), True, 'import numpy as np\n'), ((6949, 6982), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.float32'}), '(fp, dtype=np.float32)\n', (6960, 6982), True, 'import numpy as np\n'), ((2260, 2333), 'torch.nn.Conv2d', 'nn.Conv2d', (['prev_filters', 'filters', 'kernel_size', 'stride', 'padding'], {'bias': 'bias'}), '(prev_filters, filters, kernel_size, stride, padding, bias=bias)\n', (2269, 2333), True, 'import torch.nn as nn\n'), ((9736, 9758), 'torch.from_numpy', 'torch.from_numpy', (['img_'], {}), '(img_)\n', (9752, 9758), False, 'import torch\n'), ((2440, 2463), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['filters'], {}), '(filters)\n', (2454, 2463), True, 'import torch.nn as nn\n'), ((2594, 2611), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2606, 2611), True, 'import torch.nn as nn\n'), ((2858, 2901), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (2869, 2901), True, 'import torch.nn as nn\n'), ((5464, 5494), 'torch.cat', 'torch.cat', (['[out1, out2]'], {'dim': '(1)'}), '([out1, out2], dim=1)\n', (5473, 5494), False, 'import torch\n'), ((9034, 9090), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_conv_weights]'], {}), '(params[track:track + num_conv_weights])\n', (9050, 9090), False, 'import torch\n'), ((7754, 7807), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (7770, 7807), False, 'import torch\n'), ((7899, 7952), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (7915, 7952), False, 'import torch\n'), ((8051, 8104), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (8067, 8104), False, 'import torch\n'), ((8208, 8261), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_bn_params]'], {}), '(params[track:track + num_bn_params])\n', (8224, 8261), False, 'import torch\n'), ((8744, 8797), 'torch.from_numpy', 'torch.from_numpy', (['params[track:track + num_conv_bias]'], {}), '(params[track:track + num_conv_bias])\n', (8760, 8797), False, 'import torch\n'), ((6407, 6436), 'torch.cat', 'torch.cat', (['(detections, x)', '(1)'], {}), '((detections, x), 1)\n', (6416, 6436), False, 'import torch\n')] |
from __future__ import division, print_function
import pickle
import pdb
import os
import time
from sklearn.cross_validation import StratifiedKFold
from sklearn import svm
from sklearn import metrics
import gensim
import random
from learners import SK_SVM,SK_KNN,SK_MLP
from tuner import DE_Tune_ML
from model import PaperData
from utility import study
from results import results_process
import numpy as np
#import wget
import zipfile
from sklearn import neighbors
from sklearn import metrics
import threading
from threading import Barrier
import timeit
import multiprocessing
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.lda import LDA
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.cluster import AffinityPropagation
import collections
from multiprocessing import Queue
import pandas as pd
import warnings
from sklearn.neural_network import MLPClassifier
def tune_learner(learner, train_X, train_Y, tune_X, tune_Y, goal,
target_class=None):
"""
:param learner:
:param train_X:
:param train_Y:
:param tune_X:
:param tune_Y:
:param goal:
:param target_class:
:return:
"""
if not target_class:
target_class = goal
clf = learner(train_X, train_Y, tune_X, tune_Y, goal)
tuner = DE_Tune_ML(clf, clf.get_param(), goal, target_class)
return tuner.Tune()
def load_vec(d, data, use_pkl=False, file_name=None):
if use_pkl:
if os.path.isfile(file_name):
with open(file_name, "rb") as my_pickle:
return pickle.load(my_pickle)
else:
# print("call get_document_vec")
return d.get_document_vec(data, file_name)
def print_results(clfs,stop,start):
file_name = time.strftime(os.path.sep.join([".", "results",
"%Y%m%d_%H:%M:%S.txt"]))
file_name = os.path.sep.join(["20171103.txt"])
content = ""
for each in clfs:
content += each.confusion
print(content)
print("Model training time: ", stop - start)
with open(file_name, "w") as f:
f.write(content)
results_process.reports(file_name)
def get_acc(cm):
out = []
for i in range(4):
out.append(cm[i][i] / 400)
return out
@study
def run_tuning_SVM(word2vec_src, repeats=3,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, file_name=False)
print(train_pd)
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_SVM][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
print(goal)
F = {}
clfs = []
start = timeit.default_timer()
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
print(train_pd)
print(train_index)
train_data = train_pd.ix[train_index]
print(train_data)
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
stop = timeit.default_timer()
print("Model training time: ", stop - start)
print_results(clfs,stop,start)
@study
def run_tuning_MLP(word2vec_src, repeats=1,
fold=2,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, file_name=False)
print(train_pd)
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_MLP][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
print(goal)
F = {}
clfs = []
start = timeit.default_timer()
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
print(train_pd)
print(train_index)
train_data = train_pd.ix[train_index]
print(train_data)
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
stop = timeit.default_timer()
print("Model training time: ", stop - start)
print_results(clfs,stop,start)
@study
def run_tuning_KNN(word2vec_src, repeats=6,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, file_name=False)
test_pd = load_vec(data, data.test_data, file_name=False)
learner = [SK_KNN][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
F = {}
clfs = []
start = timeit.default_timer()
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
train_data = train_pd.ix[train_index]
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
stop = timeit.default_timer()
print("Model training time: ", stop - start)
print_results(clfs)
@study
def run_SVM_baseline(word2vec_src):
"""
Run SVM+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = svm.SVC(kernel="rbf", gamma=0.005)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_KNN_baseline(word2vec_src):
"""
Run KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = neighbors.KNeighborsClassifier(n_neighbors = 5)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
@study
def run_MLP(word2vec_src):
"""
Run SVM+word embedding experiment !
This is the baseline method.
:return:None
"""
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = MLPClassifier()
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
predicted = clf.predict(test_X)
print(metrics.classification_report(test_Y, predicted,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("Model training time: ", stop - start)
#################Katie's Code +++++++++++++++++++++++++++++++
# returns the svm model
def run_SVM_C(word2vec_src, train_pd, queue, l, test_pd_n):
clf = svm.SVC(kernel="rbf", gamma=0.005)
clfs = []
# word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
# data = PaperData(word2vec=word2vec_model)
# print("Train data: " + str(train_pd.shape))
# if train_pd is None: train_pd = load_vec(
# data, data.train_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
print("SVM Model Train Time", (stop-start))
clfs.append(clf)
clfs.append(l)
queue.put(clfs)
return clf
def run_KNN_C(word2vec_src, train_pd, queue, l, test_pd_n):
clf = neighbors.KNeighborsClassifier(n_neighbors = 5)
clfs = []
# word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
# data = PaperData(word2vec=word2vec_model)
# print("Train data: " + str(train_pd.shape))
# if train_pd is None: train_pd = load_vec(
# data, data.train_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
start = timeit.default_timer()
clf.fit(train_X, train_Y)
stop = timeit.default_timer()
print("Th", l)
print("KNN Model Train Time", (stop-start))
clfs.append(clf)
clfs.append(l)
queue.put(clfs)
return clf
@study
def run_tuning_SVM_C(word2vec_src,train_pd_c,queue,l,test_pd_c,repeats=1,
fold=2,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
#print("# word2vec:", word2vec_src)
#word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
#data = PaperData(word2vec=word2vec_model)
train_pd_c = train_pd_c.reset_index()
train_pd = train_pd_c
test_pd = test_pd_c
learner = [SK_SVM][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
F = {}
clfs = []
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
train_data = train_pd.ix[train_index]
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
clfs.append(l)
queue.put(clfs)
return clfs
@study
def run_tuning_KNN_C(word2vec_src,train_pd_c,queue,l,test_pd_c, repeats=10,
fold=10,
tuning=True):
"""
:param word2vec_src:str, path of word2vec model
:param repeats:int, number of repeats
:param fold: int,number of folds
:param tuning: boolean, tuning or not.
:return: None
"""
#print("# word2vec:", word2vec_src)
#word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
#data = PaperData(word2vec=word2vec_model)
train_pd_c = train_pd_c.reset_index()
train_pd = train_pd_c
test_pd = test_pd_c
learner = [SK_KNN][0]
goal = {0: "PD", 1: "PF", 2: "PREC", 3: "ACC", 4: "F", 5: "G", 6: "Macro_F",
7: "Micro_F"}[6]
F = {}
clfs = []
for i in range(repeats): # repeat n times here
kf = StratifiedKFold(train_pd.loc[:, "LinkTypeId"].values, fold,
shuffle=True)
for train_index, tune_index in kf:
train_data = train_pd.ix[train_index]
tune_data = train_pd.ix[tune_index]
train_X = train_data.loc[:, "Output"].values
train_Y = train_data.loc[:, "LinkTypeId"].values
tune_X = tune_data.loc[:, "Output"].values
tune_Y = tune_data.loc[:, "LinkTypeId"].values
test_X = test_pd.loc[:, "Output"].values
test_Y = test_pd.loc[:, "LinkTypeId"].values
params, evaluation = tune_learner(learner, train_X, train_Y, tune_X,
tune_Y, goal) if tuning else ({}, 0)
clf = learner(train_X, train_Y, test_X, test_Y, goal)
F = clf.learn(F, **params)
clfs.append(clf)
clfs.append(l)
queue.put(clfs)
return clfs
# parses and returns a given svm in the format of dictionary -
# [class](precision, recall, f1score, support)
def results_SVM(clf, test_X, test_Y):
predicted = clf.predict(test_X)
# labels: ["Duplicates", "DirectLink","IndirectLink", "Isolated"]
report_gen = metrics.classification_report(
test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)
parsed_report = parse_classification_report(report_gen)
return parsed_report
#cm=metrics.confusion_matrix(test_Y, predicted, labels=["1", "2", "3", "4"])
#print("accuracy ", get_acc(cm)
def results_SVM_C(predicted, test_Y):
#predicted = clf.predict(test_X)
# labels: ["Duplicates", "DirectLink","IndirectLink", "Isolated"]
report_gen = metrics.classification_report(
test_Y, predicted, labels=["1", "2", "3", "4"], digits=3)
print(report_gen)
classifaction_report_csv(report_gen)
parsed_report = parse_classification_report(report_gen)
return parsed_report
def classifaction_report_csv(report):
report_data = []
lines = report.split('\n')
for line in lines[2:-3]:
row = {}
row_data = line.split(' ')
row['class'] = row_data[2]
row['precision'] = float(row_data[3].strip())
row['recall'] = float(row_data[4])
row['f1_score'] = float(row_data[5])
row['support'] = float(row_data[6].strip())
report_data.append(row)
dataframe = pd.DataFrame.from_dict(report_data)
dataframe.to_csv('classification_report.csv',mode = 'a' ,index = False)
def total_summary(result_set, num_rows, start0,start1,stop0,stop1,start,stop):
weightedAvgs = [0, 0, 0]
for l in result_set:
avg_list = l['avg']
for i in range(3):
support_count = avg_list[3]
weightedAvgs[i] += (avg_list[i] * support_count)/num_rows
result = {}
result['precision'] = weightedAvgs[0]
result['recall'] = weightedAvgs[1]
result['f1'] = weightedAvgs[2]
#print(result)
print("GAP statistics Time:", (stop - start))
print("1st Model training time: ", (stop0 - start0))
print("layer 2 Models training time: ", (stop1 - start1))
print("Total Model training time: ", (stop1 - start1))
print("Total training time: ", (stop1 - start))
def run_kmeans(word2vec_src):
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
queue = Queue()
start = timeit.default_timer()
numClusters = optimalK(pd.DataFrame(train_X))
stop = timeit.default_timer()
#numClusters = 5
print("Found optimal k: " + str(numClusters))
clf = KMeans(n_clusters=numClusters,
init='k-means++', max_iter=200, n_init=1)
start0 = timeit.default_timer()
clf.fit(train_X)
stop0 = timeit.default_timer()
svm_models = [] # maintain a list of svms
s1 = timeit.default_timer()
data.train_data['clabel'] = clf.labels_
s2 = timeit.default_timer()
print("Inter - ", (s2-s1))
start1 = timeit.default_timer()
#b = Barrier(numClusters)
#Change the target here as this will be used result validation purpose
target_model = run_tuning_KNN_C
for l in range(numClusters):
cluster = data.train_data.loc[data.train_data['clabel'] == l]
print("Thread No", l)
t = threading.Thread(target = run_tuning_KNN_C, args = [word2vec_src,cluster,queue,l])
threads.append(t)
t.start()
response = queue.get()
svm_models.append(response)
#b.wait()
for thread in threads:
thread.join()
stop1 = timeit.default_timer()
print("Done all models - ", (stop1 - start0))
svm_results = [] # maintain a list of svm results
test_X = test_pd.loc[:, "Output"].tolist()
predicted = clf.predict(test_X)
data.test_data['clabel'] = predicted
total_predicted = []
total_cluster_Y = []
avg_predicted = []
avg_cluster_Y = []
for i in range(len(svm_models[l])-1):
total_predicted = []
total_cluster_Y = []
for l in range(numClusters):
cluster = data.test_data.loc[data.test_data['clabel'] == l]
svm_model = svm_models[l][i]
cluster_X = cluster.loc[:, "Output"].tolist()
cluster_Y = cluster.loc[:, "LinkTypeId"].tolist()
total_cluster_Y = np.append(total_cluster_Y,cluster_Y)
avg_cluster_Y = np.append(avg_cluster_Y,cluster_Y)
if target_model == run_tuning_SVM_C or target_model == run_tuning_KNN_C:
predicted_C = svm_model.learner.predict(cluster_X)
else:
predicted_C = svm_model.predict(cluster_X)
total_predicted = np.append(total_predicted,predicted_C)
avg_predicted = np.append(avg_predicted,predicted_C)
svm_results.append(results_SVM_C(total_predicted, total_cluster_Y))# store all the SVM result report in a dictionary
svm_results.append(results_SVM_C(avg_predicted, avg_cluster_Y))
# call the helper method to summarize the svm results
total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1,start,stop)
def run_kmeans_m(word2vec_src):
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
queue = Queue()
start = timeit.default_timer()
numClusters = optimalK(pd.DataFrame(train_X))
stop = timeit.default_timer()
#numClusters = 5
print("Found optimal k: " + str(numClusters))
clf = KMeans(n_clusters=numClusters,
init='k-means++', max_iter=200, n_init=1)
start0 = timeit.default_timer()
clf.fit(train_X)
stop0 = timeit.default_timer()
svm_models = [] # maintain a list of svms
s1 = timeit.default_timer()
data.train_data['clabel'] = clf.labels_
s2 = timeit.default_timer()
print("Inter - ", (s2-s1))
start1 = timeit.default_timer()
#Change the target here as this will be used result validation purpose
target_model = run_tuning_SVM_C
for l in range(numClusters):
cluster = data.train_data.loc[data.train_data['clabel'] == l]
print("Thread No", l)
#result.append(pool.apply_async(run_KNN_C, args = (word2vec_src,cluster,queue,)))
t = threading.Thread(target = run_tuning_SVM_C, args = [word2vec_src,cluster,queue,l,test_pd])
threads.append(t)
for th in threads:
th.start()
for th in threads:
response = queue.get()
svm_models.append(response)
svm_models = sorted(svm_models, key = lambda th: th[-1] )
stop1 = timeit.default_timer()
svm_results = [] # maintain a list of svm results
test_X = test_pd.loc[:, "Output"].tolist()
predicted = clf.predict(test_X)
data.test_data['clabel'] = predicted
total_predicted = []
total_cluster_Y = []
avg_predicted = []
avg_cluster_Y = []
print(len(svm_models[l])-1)
for i in range(len(svm_models[l])-1):
total_predicted = []
total_cluster_Y = []
for l in range(numClusters):
cluster = data.test_data.loc[data.test_data['clabel'] == l]
svm_model = svm_models[l][i]
cluster_X = cluster.loc[:, "Output"].tolist()
cluster_Y = cluster.loc[:, "LinkTypeId"].tolist()
total_cluster_Y = np.append(total_cluster_Y,cluster_Y)
avg_cluster_Y = np.append(avg_cluster_Y,cluster_Y)
if target_model == run_tuning_SVM_C or target_model == run_tuning_KNN_C:
predicted_C = svm_model.learner.predict(cluster_X)
else:
predicted_C = svm_model.predict(cluster_X)
total_predicted = np.append(total_predicted,predicted_C)
avg_predicted = np.append(avg_predicted,predicted_C)
svm_results.append(results_SVM_C(total_predicted, total_cluster_Y))# store all the SVM result report in a dictionary
svm_results.append(results_SVM_C(avg_predicted, avg_cluster_Y))
# call the helper method to summarize the svm results
total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1,start,stop)
def run_kmeans_mp(word2vec_src):
print("# word2vec:", word2vec_src)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
queue = Queue()
pool = multiprocessing.Pool()
processes = []
start = timeit.default_timer()
numClusters = optimalK(pd.DataFrame(train_X))
stop = timeit.default_timer()
#numClusters = 5
print("Found optimal k: " + str(numClusters))
clf = KMeans(n_clusters=numClusters,
init='k-means++', max_iter=200, n_init=1)
start0 = timeit.default_timer()
clf.fit(train_X)
stop0 = timeit.default_timer()
svm_models = [] # maintain a list of svms
s1 = timeit.default_timer()
data.train_data['clabel'] = clf.labels_
s2 = timeit.default_timer()
print("Inter - ", (s2-s1))
start1 = timeit.default_timer()
#Change the target here as this will be used result validation purpose
target_model = run_tuning_KNN_C
for l in range(numClusters):
cluster = data.train_data.loc[data.train_data['clabel'] == l]
print("Thread No", l)
pool.apply_async(run_tuning_KNN_C, (word2vec_src,cluster,queue,l,test_pd,))
#t = threading.Thread(target = run_tuning_SVM_C, args = [word2vec_src,cluster,queue,l,test_pd])
# for pr in processes:
# pr.start()
for pr in range(numClusters):
response = queue.get()
svm_models.append(response)
print(svm_models)
svm_models = sorted(svm_models, key = lambda th: th[-1] )
stop1 = timeit.default_timer()
print(svm_models)
svm_results = [] # maintain a list of svm results
test_X = test_pd.loc[:, "Output"].tolist()
predicted = clf.predict(test_X)
data.test_data['clabel'] = predicted
total_predicted = []
total_cluster_Y = []
avg_predicted = []
avg_cluster_Y = []
for i in range(len(svm_models[l])-1):
total_predicted = []
total_cluster_Y = []
for l in range(numClusters):
cluster = data.test_data.loc[data.test_data['clabel'] == l]
svm_model = svm_models[l][i]
cluster_X = cluster.loc[:, "Output"].tolist()
cluster_Y = cluster.loc[:, "LinkTypeId"].tolist()
total_cluster_Y = np.append(total_cluster_Y,cluster_Y)
avg_cluster_Y = np.append(avg_cluster_Y,cluster_Y)
if target_model == run_tuning_SVM_C or target_model == run_tuning_KNN_C:
predicted_C = svm_model.learner.predict(cluster_X)
else:
predicted_C = svm_model.predict(cluster_X)
total_predicted = np.append(total_predicted,predicted_C)
avg_predicted = np.append(avg_predicted,predicted_C)
svm_results.append(results_SVM_C(total_predicted, total_cluster_Y))# store all the SVM result report in a dictionary
svm_results.append(results_SVM_C(avg_predicted, avg_cluster_Y))
# call the helper method to summarize the svm results
total_summary(svm_results, test_pd.shape[0],start0,start1,stop0,stop1,start,stop)
# Source: https://anaconda.org/milesgranger/gap-statistic/notebook
def optimalK(data, nrefs=3, maxClusters=15):
"""
Calculates KMeans optimal K using Gap Statistic from Tibshirani, Walther, Hastie
Params:
data: ndarry of shape (n_samples, n_features)
nrefs: number of sample reference datasets to create
maxClusters: Maximum number of clusters to test for
Returns: (gaps, optimalK)
"""
gaps = np.zeros((len(range(1, maxClusters)),))
resultsdf = pd.DataFrame({'clusterCount': [], 'gap': []})
for gap_index, k in enumerate(range(1, maxClusters)):
# Holder for reference dispersion results
refDisps = np.zeros(nrefs)
# For n references, generate random sample and perform kmeans getting resulting dispersion of each loop
for i in range(nrefs):
# Create new random reference set
randomReference = np.random.random_sample(size=data.shape)
# Fit to it
km = KMeans(n_clusters=k, init='k-means++', max_iter=200, n_init=1)
km.fit(randomReference)
refDisp = km.inertia_
refDisps[i] = refDisp
# Fit cluster to original data and create dispersion
km = KMeans(k)
km.fit(data)
origDisp = km.inertia_
# print(str(i+1) + ": " + str(origDisp))
# Calculate gap statistic
gap = np.log(np.mean(refDisps)) - np.log(origDisp)
# Assign this loop's gap statistic to gaps
gaps[gap_index] = gap
resultsdf = resultsdf.append(
{'clusterCount': k, 'gap': gap}, ignore_index=True)
# return (gaps.argmax() + 1, resultsdf) # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal
return gaps.argmax()
# Not used, but wanted to put this code somewhere
def results_kmeans(clf, train_X, train_Y, test_X, test_Y):
predicted = clf.predict(test_X)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(train_Y, clf.labels_))
print("Completeness: %0.3f" %
metrics.completeness_score(train_Y, clf.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(train_Y, clf.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(train_Y, clf.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(train_X, clf.labels_, sample_size=1000))
"""
Parse a sklearn classification report into a dict keyed by class name
and containing a tuple (precision, recall, fscore, support) for each class
Reference: https://gist.github.com/julienr/6b9b9a03bd8224db7b4f
"""
def parse_classification_report(clfreport):
lines = clfreport.split('\n')
# Remove empty lines
lines = list(filter(lambda l: not len(l.strip()) == 0, lines))
# Starts with a header, then score for each class and finally an average
header = lines[0]
cls_lines = lines[1:-1]
avg_line = lines[-1]
assert header.split() == ['precision', 'recall', 'f1-score', 'support']
assert avg_line.split()[0] == 'avg'
# class names can have spaces - figure the width of the class field
# using indentation of the precision header
cls_field_width = len(header) - len(header.lstrip())
# Now, collect all the class names and score in a dict
def parse_line(l):
"""Parse a line of classification_report"""
cls_name = l[:cls_field_width].strip()
precision, recall, fscore, support = l[cls_field_width:].split()
precision = float(precision)
recall = float(recall)
fscore = float(fscore)
support = int(support)
return (cls_name, precision, recall, fscore, support)
data = collections.OrderedDict()
for l in cls_lines:
ret = parse_line(l)
cls_name = ret[0]
scores = ret[1:]
data[cls_name] = scores
data['avg'] = parse_line(avg_line)[1:] # average
return data
#################Katie's Code +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def prepare_word2vec():
print("Downloading pretrained word2vec models")
url = "https://zenodo.org/record/807727/files/word2vecs_models.zip"
file_name = wget.download(url)
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall()
if __name__ == "__main__":
word_src = "word2vecs_models"
threads = []
warnings.filterwarnings("ignore")
if not os.path.exists(word_src):
prepare_word2vec()
elif len(os.listdir(word_src)) == 0:
os.rmdir(word_src)
prepare_word2vec()
for x in range(1):
random.seed(x)
np.random.seed(x)
myword2vecs = [os.path.join(word_src, i) for i in os.listdir(word_src)
if "syn" not in i]
#run_MLP(myword2vecs[x])
run_tuning_MLP(myword2vecs[x])
#run_KNN_baseline(myword2vecs[x])
#run_SVM_baseline(myword2vecs[x])
#print("Run completed for baseline model--------------------------------------------------")
#run_tuning_SVM(myword2vecs[x])
#run_tuning_KNN(myword2vecs[x])
#print("Run completed for DE model--------------------------------------------------") | [
"numpy.random.seed",
"numpy.random.random_sample",
"sklearn.metrics.v_measure_score",
"sklearn.metrics.classification_report",
"os.path.isfile",
"pickle.load",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"sklearn.svm.SVC",
"multiprocessing.Queue",
"sklearn.metrics.adjusted_rand_score",... | [((1916, 1950), 'os.path.sep.join', 'os.path.sep.join', (["['20171103.txt']"], {}), "(['20171103.txt'])\n", (1932, 1950), False, 'import os\n'), ((2141, 2175), 'results.results_process.reports', 'results_process.reports', (['file_name'], {}), '(file_name)\n', (2164, 2175), False, 'from results import results_process\n'), ((2635, 2676), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (2662, 2676), False, 'import gensim\n'), ((2686, 2720), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (2695, 2720), False, 'from model import PaperData\n'), ((3036, 3058), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3056, 3058), False, 'import timeit\n'), ((3998, 4020), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4018, 4020), False, 'import timeit\n'), ((4473, 4514), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (4500, 4514), False, 'import gensim\n'), ((4524, 4558), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (4533, 4558), False, 'from model import PaperData\n'), ((4874, 4896), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4894, 4896), False, 'import timeit\n'), ((5836, 5858), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5856, 5858), False, 'import timeit\n'), ((6312, 6353), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (6339, 6353), False, 'import gensim\n'), ((6363, 6397), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (6372, 6397), False, 'from model import PaperData\n'), ((6681, 6703), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6701, 6703), False, 'import timeit\n'), ((7570, 7592), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7590, 7592), False, 'import timeit\n'), ((7900, 7934), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.005)'}), "(kernel='rbf', gamma=0.005)\n", (7907, 7934), False, 'from sklearn import svm\n'), ((7954, 7995), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (7981, 7995), False, 'import gensim\n'), ((8005, 8039), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (8014, 8039), False, 'from model import PaperData\n'), ((8360, 8382), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8380, 8382), False, 'import timeit\n'), ((8420, 8442), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8440, 8442), False, 'import timeit\n'), ((8655, 8727), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']"}), "(test_Y, predicted, labels=['1', '2', '3', '4'])\n", (8679, 8727), False, 'from sklearn import metrics\n'), ((9041, 9086), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (9071, 9086), False, 'from sklearn import neighbors\n'), ((9108, 9149), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (9135, 9149), False, 'import gensim\n'), ((9159, 9193), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (9168, 9193), False, 'from model import PaperData\n'), ((9514, 9536), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9534, 9536), False, 'import timeit\n'), ((9574, 9596), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9594, 9596), False, 'import timeit\n'), ((9809, 9881), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']"}), "(test_Y, predicted, labels=['1', '2', '3', '4'])\n", (9833, 9881), False, 'from sklearn import metrics\n'), ((10188, 10203), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (10201, 10203), False, 'from sklearn.neural_network import MLPClassifier\n'), ((10223, 10264), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (10250, 10264), False, 'import gensim\n'), ((10274, 10308), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (10283, 10308), False, 'from model import PaperData\n'), ((10629, 10651), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (10649, 10651), False, 'import timeit\n'), ((10689, 10711), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (10709, 10711), False, 'import timeit\n'), ((10924, 10996), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']"}), "(test_Y, predicted, labels=['1', '2', '3', '4'])\n", (10948, 10996), False, 'from sklearn import metrics\n'), ((11234, 11268), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.005)'}), "(kernel='rbf', gamma=0.005)\n", (11241, 11268), False, 'from sklearn import svm\n'), ((11633, 11655), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11653, 11655), False, 'import timeit\n'), ((11693, 11715), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11713, 11715), False, 'import timeit\n'), ((11898, 11943), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (11928, 11943), False, 'from sklearn import neighbors\n'), ((12310, 12332), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12330, 12332), False, 'import timeit\n'), ((12370, 12392), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12390, 12392), False, 'import timeit\n'), ((16050, 16142), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (16079, 16142), False, 'from sklearn import metrics\n'), ((16497, 16589), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (16526, 16589), False, 'from sklearn import metrics\n'), ((17161, 17196), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['report_data'], {}), '(report_data)\n', (17183, 17196), True, 'import pandas as pd\n'), ((18049, 18090), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (18076, 18090), False, 'import gensim\n'), ((18100, 18134), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (18109, 18134), False, 'from model import PaperData\n'), ((18310, 18317), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (18315, 18317), False, 'from multiprocessing import Queue\n'), ((18329, 18351), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18349, 18351), False, 'import timeit\n'), ((18409, 18431), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18429, 18431), False, 'import timeit\n'), ((18507, 18579), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numClusters', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1)\n", (18513, 18579), False, 'from sklearn.cluster import KMeans\n'), ((18609, 18631), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18629, 18631), False, 'import timeit\n'), ((18661, 18683), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18681, 18683), False, 'import timeit\n'), ((18737, 18759), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18757, 18759), False, 'import timeit\n'), ((18809, 18831), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18829, 18831), False, 'import timeit\n'), ((18872, 18894), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18892, 18894), False, 'import timeit\n'), ((19407, 19429), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (19427, 19429), False, 'import timeit\n'), ((20938, 20979), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (20965, 20979), False, 'import gensim\n'), ((20989, 21023), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (20998, 21023), False, 'from model import PaperData\n'), ((21199, 21206), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (21204, 21206), False, 'from multiprocessing import Queue\n'), ((21218, 21240), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21238, 21240), False, 'import timeit\n'), ((21298, 21320), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21318, 21320), False, 'import timeit\n'), ((21396, 21468), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numClusters', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1)\n", (21402, 21468), False, 'from sklearn.cluster import KMeans\n'), ((21498, 21520), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21518, 21520), False, 'import timeit\n'), ((21550, 21572), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21570, 21572), False, 'import timeit\n'), ((21626, 21648), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21646, 21648), False, 'import timeit\n'), ((21698, 21720), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21718, 21720), False, 'import timeit\n'), ((21761, 21783), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21781, 21783), False, 'import timeit\n'), ((22422, 22444), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (22442, 22444), False, 'import timeit\n'), ((23939, 23980), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['word2vec_src'], {}), '(word2vec_src)\n', (23966, 23980), False, 'import gensim\n'), ((23990, 24024), 'model.PaperData', 'PaperData', ([], {'word2vec': 'word2vec_model'}), '(word2vec=word2vec_model)\n', (23999, 24024), False, 'from model import PaperData\n'), ((24200, 24207), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (24205, 24207), False, 'from multiprocessing import Queue\n'), ((24217, 24239), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (24237, 24239), False, 'import multiprocessing\n'), ((24268, 24290), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24288, 24290), False, 'import timeit\n'), ((24348, 24370), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24368, 24370), False, 'import timeit\n'), ((24446, 24518), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numClusters', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=numClusters, init='k-means++', max_iter=200, n_init=1)\n", (24452, 24518), False, 'from sklearn.cluster import KMeans\n'), ((24548, 24570), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24568, 24570), False, 'import timeit\n'), ((24600, 24622), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24620, 24622), False, 'import timeit\n'), ((24676, 24698), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24696, 24698), False, 'import timeit\n'), ((24748, 24770), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24768, 24770), False, 'import timeit\n'), ((24811, 24833), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24831, 24833), False, 'import timeit\n'), ((25482, 25504), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (25502, 25504), False, 'import timeit\n'), ((27373, 27418), 'pandas.DataFrame', 'pd.DataFrame', (["{'clusterCount': [], 'gap': []}"], {}), "({'clusterCount': [], 'gap': []})\n", (27385, 27418), True, 'import pandas as pd\n'), ((30446, 30471), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (30469, 30471), False, 'import collections\n'), ((31128, 31161), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (31151, 31161), False, 'import warnings\n'), ((1527, 1552), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (1541, 1552), False, 'import os\n'), ((1797, 1854), 'os.path.sep.join', 'os.path.sep.join', (["['.', 'results', '%Y%m%d_%H:%M:%S.txt']"], {}), "(['.', 'results', '%Y%m%d_%H:%M:%S.txt'])\n", (1813, 1854), False, 'import os\n'), ((3118, 3191), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (3133, 3191), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((4956, 5029), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (4971, 5029), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((6763, 6836), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (6778, 6836), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((8485, 8577), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (8514, 8577), False, 'from sklearn import metrics\n'), ((9639, 9731), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (9668, 9731), False, 'from sklearn import metrics\n'), ((10754, 10846), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_Y', 'predicted'], {'labels': "['1', '2', '3', '4']", 'digits': '(3)'}), "(test_Y, predicted, labels=['1', '2', '3', '4'\n ], digits=3)\n", (10783, 10846), False, 'from sklearn import metrics\n'), ((13300, 13373), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (13315, 13373), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((14931, 15004), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["train_pd.loc[:, 'LinkTypeId'].values", 'fold'], {'shuffle': '(True)'}), "(train_pd.loc[:, 'LinkTypeId'].values, fold, shuffle=True)\n", (14946, 15004), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((18377, 18398), 'pandas.DataFrame', 'pd.DataFrame', (['train_X'], {}), '(train_X)\n', (18389, 18398), True, 'import pandas as pd\n'), ((19162, 19247), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_tuning_KNN_C', 'args': '[word2vec_src, cluster, queue, l]'}), '(target=run_tuning_KNN_C, args=[word2vec_src, cluster,\n queue, l])\n', (19178, 19247), False, 'import threading\n'), ((21266, 21287), 'pandas.DataFrame', 'pd.DataFrame', (['train_X'], {}), '(train_X)\n', (21278, 21287), True, 'import pandas as pd\n'), ((22109, 22203), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_tuning_SVM_C', 'args': '[word2vec_src, cluster, queue, l, test_pd]'}), '(target=run_tuning_SVM_C, args=[word2vec_src, cluster,\n queue, l, test_pd])\n', (22125, 22203), False, 'import threading\n'), ((24316, 24337), 'pandas.DataFrame', 'pd.DataFrame', (['train_X'], {}), '(train_X)\n', (24328, 24337), True, 'import pandas as pd\n'), ((27537, 27552), 'numpy.zeros', 'np.zeros', (['nrefs'], {}), '(nrefs)\n', (27545, 27552), True, 'import numpy as np\n'), ((28044, 28053), 'sklearn.cluster.KMeans', 'KMeans', (['k'], {}), '(k)\n', (28050, 28053), False, 'from sklearn.cluster import KMeans\n'), ((30981, 31012), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (30996, 31012), False, 'import zipfile\n'), ((31171, 31195), 'os.path.exists', 'os.path.exists', (['word_src'], {}), '(word_src)\n', (31185, 31195), False, 'import os\n'), ((31330, 31344), 'random.seed', 'random.seed', (['x'], {}), '(x)\n', (31341, 31344), False, 'import random\n'), ((31349, 31366), 'numpy.random.seed', 'np.random.seed', (['x'], {}), '(x)\n', (31363, 31366), True, 'import numpy as np\n'), ((20093, 20130), 'numpy.append', 'np.append', (['total_cluster_Y', 'cluster_Y'], {}), '(total_cluster_Y, cluster_Y)\n', (20102, 20130), True, 'import numpy as np\n'), ((20152, 20187), 'numpy.append', 'np.append', (['avg_cluster_Y', 'cluster_Y'], {}), '(avg_cluster_Y, cluster_Y)\n', (20161, 20187), True, 'import numpy as np\n'), ((20416, 20455), 'numpy.append', 'np.append', (['total_predicted', 'predicted_C'], {}), '(total_predicted, predicted_C)\n', (20425, 20455), True, 'import numpy as np\n'), ((20477, 20514), 'numpy.append', 'np.append', (['avg_predicted', 'predicted_C'], {}), '(avg_predicted, predicted_C)\n', (20486, 20514), True, 'import numpy as np\n'), ((23091, 23128), 'numpy.append', 'np.append', (['total_cluster_Y', 'cluster_Y'], {}), '(total_cluster_Y, cluster_Y)\n', (23100, 23128), True, 'import numpy as np\n'), ((23150, 23185), 'numpy.append', 'np.append', (['avg_cluster_Y', 'cluster_Y'], {}), '(avg_cluster_Y, cluster_Y)\n', (23159, 23185), True, 'import numpy as np\n'), ((23414, 23453), 'numpy.append', 'np.append', (['total_predicted', 'predicted_C'], {}), '(total_predicted, predicted_C)\n', (23423, 23453), True, 'import numpy as np\n'), ((23475, 23512), 'numpy.append', 'np.append', (['avg_predicted', 'predicted_C'], {}), '(avg_predicted, predicted_C)\n', (23484, 23512), True, 'import numpy as np\n'), ((26144, 26181), 'numpy.append', 'np.append', (['total_cluster_Y', 'cluster_Y'], {}), '(total_cluster_Y, cluster_Y)\n', (26153, 26181), True, 'import numpy as np\n'), ((26203, 26238), 'numpy.append', 'np.append', (['avg_cluster_Y', 'cluster_Y'], {}), '(avg_cluster_Y, cluster_Y)\n', (26212, 26238), True, 'import numpy as np\n'), ((26467, 26506), 'numpy.append', 'np.append', (['total_predicted', 'predicted_C'], {}), '(total_predicted, predicted_C)\n', (26476, 26506), True, 'import numpy as np\n'), ((26528, 26565), 'numpy.append', 'np.append', (['avg_predicted', 'predicted_C'], {}), '(avg_predicted, predicted_C)\n', (26537, 26565), True, 'import numpy as np\n'), ((27756, 27796), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'data.shape'}), '(size=data.shape)\n', (27779, 27796), True, 'import numpy as np\n'), ((27827, 27889), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': '"""k-means++"""', 'max_iter': '(200)', 'n_init': '(1)'}), "(n_clusters=k, init='k-means++', max_iter=200, n_init=1)\n", (27833, 27889), False, 'from sklearn.cluster import KMeans\n'), ((28209, 28225), 'numpy.log', 'np.log', (['origDisp'], {}), '(origDisp)\n', (28215, 28225), True, 'import numpy as np\n'), ((28722, 28769), 'sklearn.metrics.homogeneity_score', 'metrics.homogeneity_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (28747, 28769), False, 'from sklearn import metrics\n'), ((28811, 28859), 'sklearn.metrics.completeness_score', 'metrics.completeness_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (28837, 28859), False, 'from sklearn import metrics\n'), ((28890, 28935), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (28913, 28935), False, 'from sklearn import metrics\n'), ((28983, 29032), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['train_Y', 'clf.labels_'], {}), '(train_Y, clf.labels_)\n', (29010, 29032), False, 'from sklearn import metrics\n'), ((29084, 29148), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['train_X', 'clf.labels_'], {'sample_size': '(1000)'}), '(train_X, clf.labels_, sample_size=1000)\n', (29108, 29148), False, 'from sklearn import metrics\n'), ((31263, 31281), 'os.rmdir', 'os.rmdir', (['word_src'], {}), '(word_src)\n', (31271, 31281), False, 'import os\n'), ((31386, 31411), 'os.path.join', 'os.path.join', (['word_src', 'i'], {}), '(word_src, i)\n', (31398, 31411), False, 'import os\n'), ((1616, 1638), 'pickle.load', 'pickle.load', (['my_pickle'], {}), '(my_pickle)\n', (1627, 1638), False, 'import pickle\n'), ((28188, 28205), 'numpy.mean', 'np.mean', (['refDisps'], {}), '(refDisps)\n', (28195, 28205), True, 'import numpy as np\n'), ((31231, 31251), 'os.listdir', 'os.listdir', (['word_src'], {}), '(word_src)\n', (31241, 31251), False, 'import os\n'), ((31421, 31441), 'os.listdir', 'os.listdir', (['word_src'], {}), '(word_src)\n', (31431, 31441), False, 'import os\n')] |
def transform_scalars(dataset):
"""
Normalize tilt series so that each tilt image has the same total intensity.
"""
from tomviz import utils
import numpy as np
data = utils.get_array(dataset) # Get data as numpy array
if data is None: # Check if data exists
raise RuntimeError("No data array found!")
data = data.astype(np.float32) # Change tilt series type to float.
# Calculate average intensity of tilt series.
intensity = np.sum(np.average(data, 2))
for i in range(0, data.shape[2]):
# Normalize each tilt image.
data[:, :, i] = data[:, :, i] / np.sum(data[:, :, i]) * intensity
utils.set_array(dataset, data)
| [
"numpy.average",
"tomviz.utils.get_array",
"numpy.sum",
"tomviz.utils.set_array"
] | [((193, 217), 'tomviz.utils.get_array', 'utils.get_array', (['dataset'], {}), '(dataset)\n', (208, 217), False, 'from tomviz import utils\n'), ((662, 692), 'tomviz.utils.set_array', 'utils.set_array', (['dataset', 'data'], {}), '(dataset, data)\n', (677, 692), False, 'from tomviz import utils\n'), ((486, 505), 'numpy.average', 'np.average', (['data', '(2)'], {}), '(data, 2)\n', (496, 505), True, 'import numpy as np\n'), ((623, 644), 'numpy.sum', 'np.sum', (['data[:, :, i]'], {}), '(data[:, :, i])\n', (629, 644), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from matplotlib import pyplot as plt
from torch import nn as nn
from torch.nn import functional as F
from habitat_baselines.slambased.utils import generate_2dgrid
def safe_roi_2d(array2d, ymin, ymax, xmin, xmax):
(h, w) = array2d.shape
return max(0, ymin), min(ymax, h), max(0, xmin), min(xmax, w)
def f2ind(ten, i):
# Float to index
return torch.round(ten[i]).long()
def init_neights_to_channels(ks=3):
r"""Convolutional kernel,
which maps nighborhood into channels
"""
weights = np.zeros((ks * ks, 1, ks, ks), dtype=np.float32)
for y in range(ks):
for x in range(ks):
weights[x * ks + y, 0, y, x] = 1.0
return weights
class SoftArgMin(nn.Module):
def __init__(self, beta=5):
super(SoftArgMin, self).__init__()
self.beta = beta
return
def forward(self, x, coords2d=None):
bx_sm = F.softmax(self.beta * (-x).view(1, -1), dim=1)
if coords2d is None:
coords2d = generate_2dgrid(x.size(2), x.size(3), False)
coords2d_flat = coords2d.view(2, -1)
return (bx_sm.expand_as(coords2d_flat) * coords2d_flat).sum(
dim=1
) / bx_sm.sum(dim=1)
class HardArgMin(nn.Module):
def __init__(self):
super(HardArgMin, self).__init__()
return
def forward(self, x, coords2d=None):
val, idx = x.view(-1).min(dim=0)
if coords2d is None:
coords2d = generate_2dgrid(x.size(2), x.size(3), False)
coords2d_flat = coords2d.view(2, -1)
return coords2d_flat[:, idx].view(2)
class DifferentiableStarPlanner(nn.Module):
def __init__(
self,
max_steps=500,
visualize=False,
preprocess=False,
beta=100,
connectivity="eight",
device=torch.device("cpu"), # noqa: B008
**kwargs
):
super(DifferentiableStarPlanner, self).__init__()
self.eps = 1e-12
self.max_steps = max_steps
self.visualize = visualize
self.inf = 1e7
self.ob_cost = 10000.0
self.device = device
self.beta = beta
self.preprocess = preprocess
# self.argmin = SoftArgMin(beta)
self.argmin = HardArgMin()
self.neights2channels = nn.Conv2d(1, 9, kernel_size=(3, 3), bias=False)
self.neights2channels.weight.data = torch.from_numpy(
init_neights_to_channels(3)
)
self.neights2channels.to(device)
self.preprocessNet = nn.Conv2d(
1, 1, kernel_size=(3, 3), padding=1, bias=False
)
self.preprocessNet.weight.data = torch.from_numpy(
np.array(
[
[
[
[0.00001, 0.0001, 0.00001],
[0.0001, 1, 0.0001],
[0.00001, 0.0001, 0.00001],
]
]
],
dtype=np.float32,
)
)
self.preprocessNet.to(device)
if connectivity == "eight":
self.gx_to_right = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False)
self.gx_to_right.weight.data = torch.from_numpy(
np.array([[[[0, 1, -1]]]], dtype=np.float32)
)
self.gx_to_right.to(device)
self.gx_to_left = nn.Conv2d(1, 1, kernel_size=(1, 3), bias=False)
self.gx_to_left.weight.data = torch.from_numpy(
np.array([[[[-1, 1, 0]]]], dtype=np.float32)
)
self.gx_to_left.to(device)
self.gy_to_up = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False)
self.gy_to_up.weight.data = torch.from_numpy(
np.array([[[[0], [1], [-1]]]], dtype=np.float32)
)
self.gy_to_up.to(device)
self.gy_to_down = nn.Conv2d(1, 1, kernel_size=(3, 1), bias=False)
self.gy_to_down.weight.data = torch.from_numpy(
np.array([[[[-1], [1], [0]]]], dtype=np.float32)
)
self.gy_to_down.to(device)
else:
raise ValueError('Only "eight" connectivity now supported')
return
def preprocess_obstacle_map(self, obstacle_map):
if self.preprocess:
return self.preprocessNet(obstacle_map)
return obstacle_map
def coords2grid(self, node_coords, h, w):
grid = node_coords.squeeze() - torch.FloatTensor(
(h / 2.0, w / 2.0)
).to(self.device)
grid = grid / torch.FloatTensor((h / 2.0, w / 2.0)).to(self.device)
return grid.view(1, 1, 1, 2).flip(3)
def init_closelistmap(self):
return torch.zeros_like(self.start_map).float()
def init_openlistmap(self):
return self.start_map.clone()
def init_g_map(self):
return torch.clamp(
self.inf
* (torch.ones_like(self.start_map) - self.start_map.clone()),
min=0,
max=self.inf,
)
def safe_roi_2d(self, ymin, ymax, xmin, xmax):
return (
int(max(0, torch.round(ymin).item())),
int(min(torch.round(ymax).item(), self.height)),
int(max(0, torch.round(xmin).item())),
int(min(torch.round(xmax).item(), self.width)),
)
def forward(
self,
obstacles,
coords,
start_map,
goal_map,
non_obstacle_cost_map=None,
additional_steps=50,
return_path=True,
):
self.trav_init_time = 0
self.trav_mask_time = 0
self.trav_soft_time = 0
self.conv_time = 0
self.close_time = 0
self.obstacles = self.preprocess_obstacle_map(
obstacles.to(self.device)
)
self.start_map = start_map.to(self.device)
self.been_there = torch.zeros_like(self.start_map).to(
torch.device("cpu")
)
self.coords = coords.to(self.device)
self.goal_map = goal_map.to(self.device)
self.been_there = torch.zeros_like(self.goal_map).to(self.device)
self.height = obstacles.size(2)
self.width = obstacles.size(3)
m, goal_idx = torch.max(self.goal_map.view(-1), 0)
c_map = self.calculate_local_path_costs(non_obstacle_cost_map)
# c_map might be non persistent in map update
self.g_map = self.init_g_map()
self.close_list_map = self.init_closelistmap()
self.open_list_map = self.init_openlistmap()
not_done = False
step = 0
stopped_by_max_iter = False
if self.visualize:
self.fig, self.ax = plt.subplots(1, 1)
self.image = self.ax.imshow(
self.g_map.squeeze().cpu().detach().numpy().astype(np.float32),
animated=True,
)
self.fig.canvas.draw()
not_done = (self.close_list_map.view(-1)[goal_idx].item() < 1.0) or (
self.g_map.view(-1)[goal_idx].item() >= 0.9 * self.ob_cost
)
rad = 1
self.start_coords = (
(self.coords * self.start_map.expand_as(self.coords))
.sum(dim=2)
.sum(dim=2)
.squeeze()
)
node_coords = self.start_coords
self.goal_coords = (
(self.coords * self.goal_map.expand_as(self.coords))
.sum(dim=2)
.sum(dim=2)
.squeeze()
)
self.max_steps = 4 * int(
torch.sqrt(
((self.start_coords - self.goal_coords) ** 2).sum() + 1e-6
).item()
)
while not_done:
ymin, ymax, xmin, xmax = self.safe_roi_2d(
node_coords[0] - rad,
node_coords[0] + rad + 1,
node_coords[1] - rad,
node_coords[1] + rad + 1,
)
if (
(ymin - 1 > 0)
and (xmin - 1 > 0)
and (ymax + 1 < self.height)
and (xmax + 1 < self.width)
):
n2c = self.neights2channels(
self.g_map[:, :, ymin - 1 : ymax + 1, xmin - 1 : xmax + 1]
)
self.g_map[:, :, ymin:ymax, xmin:xmax] = torch.min(
self.g_map[:, :, ymin:ymax, xmin:xmax].clone(),
(n2c + c_map[:, :, ymin:ymax, xmin:xmax]).min(
dim=1, keepdim=True
)[0],
)
self.close_list_map[:, :, ymin:ymax, xmin:xmax] = torch.max(
self.close_list_map[:, :, ymin:ymax, xmin:xmax],
self.open_list_map[:, :, ymin:ymax, xmin:xmax],
)
self.open_list_map[:, :, ymin:ymax, xmin:xmax] = F.relu(
F.max_pool2d(
self.open_list_map[
:, :, ymin - 1 : ymax + 1, xmin - 1 : xmax + 1
],
3,
stride=1,
padding=0,
)
- self.close_list_map[:, :, ymin:ymax, xmin:xmax]
- self.obstacles[:, :, ymin:ymax, xmin:xmax]
)
else:
self.g_map = torch.min(
self.g_map,
(
self.neights2channels(
F.pad(self.g_map, (1, 1, 1, 1), "replicate")
)
+ c_map
).min(dim=1, keepdim=True)[0],
)
self.close_list_map = torch.max(
self.close_list_map, self.open_list_map
)
self.open_list_map = F.relu(
F.max_pool2d(self.open_list_map, 3, stride=1, padding=1)
- self.close_list_map
- self.obstacles
)
step += 1
if step >= self.max_steps:
stopped_by_max_iter = True
break
not_done = (
self.close_list_map.view(-1)[goal_idx].item() < 1.0
) or (self.g_map.view(-1)[goal_idx].item() >= 0.1 * self.inf)
rad += 1
if not stopped_by_max_iter:
for _ in range(additional_steps):
# now propagating beyong start point
self.g_map = torch.min(
self.g_map,
(
self.neights2channels(
F.pad(self.g_map, (1, 1, 1, 1), "replicate")
)
+ c_map
).min(dim=1, keepdim=True)[0],
)
self.close_list_map = torch.max(
self.close_list_map, self.open_list_map
)
self.open_list_map = F.relu(
F.max_pool2d(self.open_list_map, 3, stride=1, padding=1)
- self.close_list_map
- self.obstacles
)
if return_path:
out_path, cost = self.reconstruct_path()
return out_path, cost
return None
def calculate_local_path_costs(self, non_obstacle_cost_map=None):
coords = self.coords
h = coords.size(2)
w = coords.size(3)
obstacles_pd = F.pad(self.obstacles, (1, 1, 1, 1), "replicate")
if non_obstacle_cost_map is None:
learned_bias = torch.ones_like(self.obstacles).to(
obstacles_pd.device
)
else:
learned_bias = non_obstacle_cost_map.to(obstacles_pd.device)
left_diff_sq = (
self.gx_to_left(
F.pad(coords[:, 1:2, :, :], (1, 1, 0, 0), "replicate")
)
** 2
)
right_diff_sq = (
self.gx_to_right(
F.pad(coords[:, 1:2, :, :], (1, 1, 0, 0), "replicate")
)
** 2
)
up_diff_sq = (
self.gy_to_up(
F.pad(coords[:, 0:1, :, :], (0, 0, 1, 1), "replicate")
)
** 2
)
down_diff_sq = (
self.gy_to_down(
F.pad(coords[:, 0:1, :, :], (0, 0, 1, 1), "replicate")
)
** 2
)
out = torch.cat(
[
# Order in from up to down, from left to right
# hopefully same as in PyTorch
torch.sqrt(left_diff_sq + up_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 0:h, 0:w],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
torch.sqrt(left_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 0:h, 1 : w + 1],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
torch.sqrt(left_diff_sq + down_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 2 : h + 2, 0:w],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
torch.sqrt(up_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 0:h, 1 : w + 1],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
0 * right_diff_sq
+ self.ob_cost
* obstacles_pd[:, :, 1 : h + 1, 1 : w + 1], # current center
torch.sqrt(down_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 2 : h + 2, 1 : w + 1],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
torch.sqrt(right_diff_sq + up_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 0:h, 2 : w + 2],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
torch.sqrt(right_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 1 : h + 1, 2 : w + 2],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
torch.sqrt(right_diff_sq + down_diff_sq + self.eps)
+ self.ob_cost
* torch.max(
obstacles_pd[:, :, 2 : h + 2, 2 : w + 2],
obstacles_pd[:, :, 1 : h + 1, 1 : w + 1],
),
],
dim=1,
)
return out + torch.clamp(
learned_bias.expand_as(out), min=0, max=self.ob_cost
)
def propagate_traversal(self, node_coords, close, g, coords):
ymin, ymax, xmin, xmax = self.safe_roi_2d(
node_coords[0] - 1,
node_coords[0] + 2,
node_coords[1] - 1,
node_coords[1] + 2,
)
mask = close[:, :, ymin:ymax, xmin:xmax] > 0
mask[
:, :, f2ind(node_coords, 0) - ymin, f2ind(node_coords, 1) - xmin
] = 0
mask = mask > 0
current_g_cost = g[:, :, ymin:ymax, xmin:xmax][mask].clone()
if len(current_g_cost.view(-1)) == 0:
# we are kind surrounded by obstacles,
# but still need to output something
mask = torch.relu(
1.0 - self.been_there[:, :, ymin:ymax, xmin:xmax]
)
mask[
:,
:,
f2ind(node_coords, 0) - ymin,
f2ind(node_coords, 1) - xmin,
] = 0
mask = mask > 0
current_g_cost = g[:, :, ymin:ymax, xmin:xmax][mask].clone()
if len(current_g_cost.view(-1)) > 1:
current_g_cost = current_g_cost - torch.min(current_g_cost).item()
current_g_cost = (
current_g_cost
+ 0.41
* torch.randperm(
len(current_g_cost),
dtype=torch.float32,
device=torch.device("cpu"),
)
/ (len(current_g_cost))
)
#
coords_roi = coords[:, :, ymin:ymax, xmin:xmax]
out = self.argmin(
current_g_cost, coords_roi[mask.expand_as(coords_roi)]
)
return out
def get_clean_costmap_and_goodmask(self):
good_mask = 1 - F.max_pool2d(self.obstacles, 3, stride=1, padding=1)
costmap = self.g_map
obstacle_cost_corrected = 10000.0
sampling_map = torch.clamp(costmap, min=0, max=obstacle_cost_corrected)
return sampling_map, good_mask
def reconstruct_path(self):
out_path = []
goal_coords = self.goal_coords.cpu()
start_coords = self.start_coords.cpu()
cost = self.g_map[:, :, f2ind(goal_coords, 0), f2ind(goal_coords, 1)]
# Traversing
done = False
node_coords = goal_coords.cpu()
out_path.append(node_coords)
self.been_there = 0 * self.been_there.cpu()
self.been_there[
:, :, f2ind(node_coords, 0), f2ind(node_coords, 1)
] = 1.0
self.close_list_map = self.close_list_map.cpu()
self.g_map = self.g_map.cpu()
self.coords = self.coords.cpu()
count1 = 0
while not done:
node_coords = self.propagate_traversal(
node_coords, self.close_list_map, self.g_map, self.coords
)
self.been_there[
:, :, f2ind(node_coords, 0), f2ind(node_coords, 1)
] = 1.0
if torch.norm(node_coords - out_path[-1], 2).item() < 0.3:
y = node_coords.flatten()[0].long()
x = node_coords.flatten()[1].long()
print(self.g_map[0, 0, y - 2 : y + 3, x - 2 : x + 3])
print("loop in out_path", node_coords)
raise ValueError("loop in out_path")
out_path.append(node_coords)
done = torch.norm(node_coords - start_coords.cpu(), 2).item() < 0.3
count1 += 1
if count1 > 250:
break
return out_path, cost
| [
"torch.ones_like",
"torch.relu",
"torch.zeros_like",
"torch.sqrt",
"torch.norm",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.FloatTensor",
"torch.clamp",
"numpy.array",
"torch.nn.functional.max_pool2d",
"torch.max",
"torch.device",
"torch.min",
"matplotlib.pyplot.subplots",
"torch.round",... | [((752, 800), 'numpy.zeros', 'np.zeros', (['(ks * ks, 1, ks, ks)'], {'dtype': 'np.float32'}), '((ks * ks, 1, ks, ks), dtype=np.float32)\n', (760, 800), True, 'import numpy as np\n'), ((2026, 2045), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2038, 2045), False, 'import torch\n'), ((2491, 2538), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(9)'], {'kernel_size': '(3, 3)', 'bias': '(False)'}), '(1, 9, kernel_size=(3, 3), bias=False)\n', (2500, 2538), True, 'from torch import nn as nn\n'), ((2721, 2779), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(3, 3)', 'padding': '(1)', 'bias': '(False)'}), '(1, 1, kernel_size=(3, 3), padding=1, bias=False)\n', (2730, 2779), True, 'from torch import nn as nn\n'), ((11596, 11644), 'torch.nn.functional.pad', 'F.pad', (['self.obstacles', '(1, 1, 1, 1)', '"""replicate"""'], {}), "(self.obstacles, (1, 1, 1, 1), 'replicate')\n", (11601, 11644), True, 'from torch.nn import functional as F\n'), ((16938, 16994), 'torch.clamp', 'torch.clamp', (['costmap'], {'min': '(0)', 'max': 'obstacle_cost_corrected'}), '(costmap, min=0, max=obstacle_cost_corrected)\n', (16949, 16994), False, 'import torch\n'), ((594, 613), 'torch.round', 'torch.round', (['ten[i]'], {}), '(ten[i])\n', (605, 613), False, 'import torch\n'), ((2873, 2979), 'numpy.array', 'np.array', (['[[[[1e-05, 0.0001, 1e-05], [0.0001, 1, 0.0001], [1e-05, 0.0001, 1e-05]]]]'], {'dtype': 'np.float32'}), '([[[[1e-05, 0.0001, 1e-05], [0.0001, 1, 0.0001], [1e-05, 0.0001, \n 1e-05]]]], dtype=np.float32)\n', (2881, 2979), True, 'import numpy as np\n'), ((3340, 3387), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(1, 3)', 'bias': '(False)'}), '(1, 1, kernel_size=(1, 3), bias=False)\n', (3349, 3387), True, 'from torch import nn as nn\n'), ((3595, 3642), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(1, 3)', 'bias': '(False)'}), '(1, 1, kernel_size=(1, 3), bias=False)\n', (3604, 3642), True, 'from torch import nn as nn\n'), ((3846, 3893), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(3, 1)', 'bias': '(False)'}), '(1, 1, kernel_size=(3, 1), bias=False)\n', (3855, 3893), True, 'from torch import nn as nn\n'), ((4099, 4146), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(1)'], {'kernel_size': '(3, 1)', 'bias': '(False)'}), '(1, 1, kernel_size=(3, 1), bias=False)\n', (4108, 4146), True, 'from torch import nn as nn\n'), ((6122, 6141), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6134, 6141), False, 'import torch\n'), ((6867, 6885), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (6879, 6885), True, 'from matplotlib import pyplot as plt\n'), ((15727, 15788), 'torch.relu', 'torch.relu', (['(1.0 - self.been_there[:, :, ymin:ymax, xmin:xmax])'], {}), '(1.0 - self.been_there[:, :, ymin:ymax, xmin:xmax])\n', (15737, 15788), False, 'import torch\n'), ((16791, 16843), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.obstacles', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(self.obstacles, 3, stride=1, padding=1)\n', (16803, 16843), True, 'from torch.nn import functional as F\n'), ((3465, 3509), 'numpy.array', 'np.array', (['[[[[0, 1, -1]]]]'], {'dtype': 'np.float32'}), '([[[[0, 1, -1]]]], dtype=np.float32)\n', (3473, 3509), True, 'import numpy as np\n'), ((3719, 3763), 'numpy.array', 'np.array', (['[[[[-1, 1, 0]]]]'], {'dtype': 'np.float32'}), '([[[[-1, 1, 0]]]], dtype=np.float32)\n', (3727, 3763), True, 'import numpy as np\n'), ((3968, 4016), 'numpy.array', 'np.array', (['[[[[0], [1], [-1]]]]'], {'dtype': 'np.float32'}), '([[[[0], [1], [-1]]]], dtype=np.float32)\n', (3976, 4016), True, 'import numpy as np\n'), ((4223, 4271), 'numpy.array', 'np.array', (['[[[[-1], [1], [0]]]]'], {'dtype': 'np.float32'}), '([[[[-1], [1], [0]]]], dtype=np.float32)\n', (4231, 4271), True, 'import numpy as np\n'), ((4920, 4952), 'torch.zeros_like', 'torch.zeros_like', (['self.start_map'], {}), '(self.start_map)\n', (4936, 4952), False, 'import torch\n'), ((6073, 6105), 'torch.zeros_like', 'torch.zeros_like', (['self.start_map'], {}), '(self.start_map)\n', (6089, 6105), False, 'import torch\n'), ((6272, 6303), 'torch.zeros_like', 'torch.zeros_like', (['self.goal_map'], {}), '(self.goal_map)\n', (6288, 6303), False, 'import torch\n'), ((8757, 8868), 'torch.max', 'torch.max', (['self.close_list_map[:, :, ymin:ymax, xmin:xmax]', 'self.open_list_map[:, :, ymin:ymax, xmin:xmax]'], {}), '(self.close_list_map[:, :, ymin:ymax, xmin:xmax], self.\n open_list_map[:, :, ymin:ymax, xmin:xmax])\n', (8766, 8868), False, 'import torch\n'), ((9844, 9894), 'torch.max', 'torch.max', (['self.close_list_map', 'self.open_list_map'], {}), '(self.close_list_map, self.open_list_map)\n', (9853, 9894), False, 'import torch\n'), ((10980, 11030), 'torch.max', 'torch.max', (['self.close_list_map', 'self.open_list_map'], {}), '(self.close_list_map, self.open_list_map)\n', (10989, 11030), False, 'import torch\n'), ((11957, 12011), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 1:2, :, :]', '(1, 1, 0, 0)', '"""replicate"""'], {}), "(coords[:, 1:2, :, :], (1, 1, 0, 0), 'replicate')\n", (11962, 12011), True, 'from torch.nn import functional as F\n'), ((12125, 12179), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 1:2, :, :]', '(1, 1, 0, 0)', '"""replicate"""'], {}), "(coords[:, 1:2, :, :], (1, 1, 0, 0), 'replicate')\n", (12130, 12179), True, 'from torch.nn import functional as F\n'), ((12287, 12341), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 0:1, :, :]', '(0, 0, 1, 1)', '"""replicate"""'], {}), "(coords[:, 0:1, :, :], (0, 0, 1, 1), 'replicate')\n", (12292, 12341), True, 'from torch.nn import functional as F\n'), ((12453, 12507), 'torch.nn.functional.pad', 'F.pad', (['coords[:, 0:1, :, :]', '(0, 0, 1, 1)', '"""replicate"""'], {}), "(coords[:, 0:1, :, :], (0, 0, 1, 1), 'replicate')\n", (12458, 12507), True, 'from torch.nn import functional as F\n'), ((4674, 4711), 'torch.FloatTensor', 'torch.FloatTensor', (['(h / 2.0, w / 2.0)'], {}), '((h / 2.0, w / 2.0))\n', (4691, 4711), False, 'import torch\n'), ((4772, 4809), 'torch.FloatTensor', 'torch.FloatTensor', (['(h / 2.0, w / 2.0)'], {}), '((h / 2.0, w / 2.0))\n', (4789, 4809), False, 'import torch\n'), ((5123, 5154), 'torch.ones_like', 'torch.ones_like', (['self.start_map'], {}), '(self.start_map)\n', (5138, 5154), False, 'import torch\n'), ((11714, 11745), 'torch.ones_like', 'torch.ones_like', (['self.obstacles'], {}), '(self.obstacles)\n', (11729, 11745), False, 'import torch\n'), ((12714, 12762), 'torch.sqrt', 'torch.sqrt', (['(left_diff_sq + up_diff_sq + self.eps)'], {}), '(left_diff_sq + up_diff_sq + self.eps)\n', (12724, 12762), False, 'import torch\n'), ((12970, 13005), 'torch.sqrt', 'torch.sqrt', (['(left_diff_sq + self.eps)'], {}), '(left_diff_sq + self.eps)\n', (12980, 13005), False, 'import torch\n'), ((13219, 13269), 'torch.sqrt', 'torch.sqrt', (['(left_diff_sq + down_diff_sq + self.eps)'], {}), '(left_diff_sq + down_diff_sq + self.eps)\n', (13229, 13269), False, 'import torch\n'), ((13483, 13516), 'torch.sqrt', 'torch.sqrt', (['(up_diff_sq + self.eps)'], {}), '(up_diff_sq + self.eps)\n', (13493, 13516), False, 'import torch\n'), ((13873, 13908), 'torch.sqrt', 'torch.sqrt', (['(down_diff_sq + self.eps)'], {}), '(down_diff_sq + self.eps)\n', (13883, 13908), False, 'import torch\n'), ((14128, 14177), 'torch.sqrt', 'torch.sqrt', (['(right_diff_sq + up_diff_sq + self.eps)'], {}), '(right_diff_sq + up_diff_sq + self.eps)\n', (14138, 14177), False, 'import torch\n'), ((14391, 14427), 'torch.sqrt', 'torch.sqrt', (['(right_diff_sq + self.eps)'], {}), '(right_diff_sq + self.eps)\n', (14401, 14427), False, 'import torch\n'), ((14647, 14698), 'torch.sqrt', 'torch.sqrt', (['(right_diff_sq + down_diff_sq + self.eps)'], {}), '(right_diff_sq + down_diff_sq + self.eps)\n', (14657, 14698), False, 'import torch\n'), ((12812, 12889), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 0:w]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 0:w], obstacles_pd[:, :, 1:h + 1, 1:w + 1])\n', (12821, 12889), False, 'import torch\n'), ((13055, 13140), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 1:w + 1]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 1:w + 1], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (13064, 13140), False, 'import torch\n'), ((13319, 13404), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 2:h + 2, 0:w]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 2:h + 2, 0:w], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (13328, 13404), False, 'import torch\n'), ((13566, 13651), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 1:w + 1]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 1:w + 1], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (13575, 13651), False, 'import torch\n'), ((13958, 14047), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 2:h + 2, 1:w + 1]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 2:h + 2, 1:w + 1], obstacles_pd[:, :, 1:h + 1,\n 1:w + 1])\n', (13967, 14047), False, 'import torch\n'), ((14227, 14312), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 0:h, 2:w + 2]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 0:h, 2:w + 2], obstacles_pd[:, :, 1:h + 1, 1:w +\n 1])\n', (14236, 14312), False, 'import torch\n'), ((14477, 14566), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 1:h + 1, 2:w + 2]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 1:h + 1, 2:w + 2], obstacles_pd[:, :, 1:h + 1,\n 1:w + 1])\n', (14486, 14566), False, 'import torch\n'), ((14748, 14837), 'torch.max', 'torch.max', (['obstacles_pd[:, :, 2:h + 2, 2:w + 2]', 'obstacles_pd[:, :, 1:h + 1, 1:w + 1]'], {}), '(obstacles_pd[:, :, 2:h + 2, 2:w + 2], obstacles_pd[:, :, 1:h + 1,\n 1:w + 1])\n', (14757, 14837), False, 'import torch\n'), ((16177, 16202), 'torch.min', 'torch.min', (['current_g_cost'], {}), '(current_g_cost)\n', (16186, 16202), False, 'import torch\n'), ((17983, 18024), 'torch.norm', 'torch.norm', (['(node_coords - out_path[-1])', '(2)'], {}), '(node_coords - out_path[-1], 2)\n', (17993, 18024), False, 'import torch\n'), ((5329, 5346), 'torch.round', 'torch.round', (['ymin'], {}), '(ymin)\n', (5340, 5346), False, 'import torch\n'), ((5377, 5394), 'torch.round', 'torch.round', (['ymax'], {}), '(ymax)\n', (5388, 5394), False, 'import torch\n'), ((5441, 5458), 'torch.round', 'torch.round', (['xmin'], {}), '(xmin)\n', (5452, 5458), False, 'import torch\n'), ((5489, 5506), 'torch.round', 'torch.round', (['xmax'], {}), '(xmax)\n', (5500, 5506), False, 'import torch\n'), ((9016, 9120), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.open_list_map[:, :, ymin - 1:ymax + 1, xmin - 1:xmax + 1]', '(3)'], {'stride': '(1)', 'padding': '(0)'}), '(self.open_list_map[:, :, ymin - 1:ymax + 1, xmin - 1:xmax + 1],\n 3, stride=1, padding=0)\n', (9028, 9120), True, 'from torch.nn import functional as F\n'), ((9998, 10054), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.open_list_map', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(self.open_list_map, 3, stride=1, padding=1)\n', (10010, 10054), True, 'from torch.nn import functional as F\n'), ((11134, 11190), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['self.open_list_map', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(self.open_list_map, 3, stride=1, padding=1)\n', (11146, 11190), True, 'from torch.nn import functional as F\n'), ((16438, 16457), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16450, 16457), False, 'import torch\n'), ((9634, 9678), 'torch.nn.functional.pad', 'F.pad', (['self.g_map', '(1, 1, 1, 1)', '"""replicate"""'], {}), "(self.g_map, (1, 1, 1, 1), 'replicate')\n", (9639, 9678), True, 'from torch.nn import functional as F\n'), ((10770, 10814), 'torch.nn.functional.pad', 'F.pad', (['self.g_map', '(1, 1, 1, 1)', '"""replicate"""'], {}), "(self.g_map, (1, 1, 1, 1), 'replicate')\n", (10775, 10814), True, 'from torch.nn import functional as F\n')] |
import math
import numpy as np
import re
class bitstream:
def __init__(self,array = None):
if(str(locals()['array']) == 'None'):
self.array_unit_size = 8
self.array_type = 'uint'
self.valid = True
self.read_index = 0
self.r_bit_index = 0
self.write_index = 0
self.w_bit_index = 0
self.array = np.zeros((8),dtype = 'uint8')
self.capacity = 8*self.array_unit_size
self.size = 0
else:
self.array_unit_size = int(re.search(r'\d+',str(array.dtype)).group())
self.array_type = re.findall('[a-zA-Z]+',str(array.dtype))[0]
if(not(math.floor(math.log(self.array_unit_size,2)) == math.log(self.array_unit_size,2)) or not(self.array_type == 'uint')):
print('Error : Array must be of valid dtype (uint) ',self.array_type,' ',math.log(self.array_unit_size,2))
self.valid = False
return
if(len(array.shape)>1):
print(array.shape)
print('Error : Array must be one dimensional')
self.valid = False
return
self.valid = True
self.read_index = 0
self.r_bit_index = 0
array_size = 2**math.ceil(math.log(len(array)+1,2))
self.array = np.zeros((array_size),dtype = array.dtype)
self.array[0:len(array)] = array
self.capacity = array_size * self.array_unit_size
self.write_index = len(array)
self.w_bit_index = 0
self.size = len(array)*self.array_unit_size
"""
def size(self):
ri = self.read_index
wi = self.write_index
if(self.read_index> self.write_index):
wi = wi + len(self.array)
count = (wi - ri -1)*self.array_unit_size
count = count + self.w_bit_index + (self.array_unit_size - self.r_bit_index + 1)
return count
"""
def get_next(self, number_of_bits):
if ((not self.valid) or self.is_empty()):
print('Error : Either stream doesnt have enough bits or stream does not contain valid data')
return
if ((number_of_bits > self.size)):
number_of_bits = self.size
rbi = self.r_bit_index
ri = self.read_index
s = self.size
if (self.r_bit_index + number_of_bits - 1 < self.array_unit_size):
#print('before : ',self.read_index, ' ', self.r_bit_index)
mask = int(2 ** number_of_bits) - 1
mask = mask << (self.r_bit_index)
ans = (mask & self.array[self.read_index]) >> self.r_bit_index
self.r_bit_index = (self.r_bit_index + number_of_bits) % self.array_unit_size
if (self.r_bit_index == 0):
self.read_index = (self.read_index + 1) % len(self.array)
#print('after : ',self.read_index,' ',self.r_bit_index)
#print(bin(ans))
else:
num_bits_frm_cur = self.array_unit_size - self.r_bit_index
num_bits_frm_nxt = number_of_bits - num_bits_frm_cur
ans1 = self.read(num_bits_frm_cur)
if (not (self.r_bit_index == 0)):
self.read_index = (self.read_index + 1) % len(self.array)
ans2 = self.read(num_bits_frm_nxt)
# ans = (ans2<<math.ceil(math.log(ans1+1,2))) + ans1
ans = (ans2 << (num_bits_frm_cur)) + ans1
#print(ans2 << (num_bits_frm_cur) ,'+', ans1)
self.r_bit_index = rbi
self.read_index = ri
self.size = s
return ans
def read(self,number_of_bits):
s = self.size
if((not self.valid )or self.is_empty()):
print('Error : Either stream doesnt have enough bits or stream does not contain valid data')
return
if((number_of_bits > self.size)):
number_of_bits = self.size
if(self.r_bit_index + number_of_bits-1 < self.array_unit_size):
mask = int(2**number_of_bits) - 1
mask = mask<<(self.r_bit_index)
ans = (mask & self.array[self.read_index])>>self.r_bit_index
self.r_bit_index = (self.r_bit_index + number_of_bits)%self.array_unit_size
if(self.r_bit_index == 0):
self.read_index = (self.read_index +1)%len(self.array)
self.size = self.size - number_of_bits
#print(self.read_index,' ',self.r_bit_index)
#print(bin(ans))
else:
num_bits_frm_cur = self.array_unit_size - self.r_bit_index
num_bits_frm_nxt = number_of_bits - num_bits_frm_cur
ans1 = self.read(num_bits_frm_cur)
if(not(self.r_bit_index == 0)):
self.read_index = (self.read_index +1)%len(self.array)
ans2 = self.read(num_bits_frm_nxt)
#ans = (ans2<<math.ceil(math.log(ans1+1,2))) + ans1
ans = (ans2<<(num_bits_frm_cur)) + ans1
s2 = self.size
#print(s-s2,'removed : ',ans)
return ans
def write(self,number_of_bits,val):
s = self.array_unit_size
w = self.w_bit_index
wi = self.write_index
r = self.r_bit_index
ri = self.read_index
rb = ri*s + r
wb = wi *s + w
if(self.size+number_of_bits > self.capacity):
a = self.array
self.array = np.zeros((2*len(a)),dtype= a.dtype)
self.capacity = 2*len(a)*s
if(rb<wb):
self.array[0:(wi-ri+1)] = a[r:(wi+1)]
self.read_index = 0
self.write_index = wi-ri
else:
self.array[0:wi]=a[0:wi]
self.array[-(len(a) - ri):] = a[ri:]
self.read_index = len(self.array) -(len(a) - ri)
if(number_of_bits + self.w_bit_index -1 < self.array_unit_size):
x = self.array[self.write_index]
val = val << self.w_bit_index
mask = 2**number_of_bits - 1
mask = mask<<(self.w_bit_index)
self.array[self.write_index]= (val & mask) + (x &(~mask))
self.w_bit_index = (self.w_bit_index + number_of_bits)%self.array_unit_size
if(self.w_bit_index == 0):
self.write_index = (self.write_index +1)%len(self.array)
self.size = self.size + number_of_bits
else:
num_bits_in_cur = self.array_unit_size - self.w_bit_index
num_bits_in_nxt = number_of_bits - num_bits_in_cur
self.write(num_bits_in_cur, val)
if(not(self.w_bit_index == 0)):
self.write_index = (self.write_index +1)%len(self.array)
val = val>>(num_bits_in_cur)
self.write(num_bits_in_nxt,val )
def read_from_end(self, number_of_bits):
s = self.size
if ((not self.valid) or self.is_empty()):
print('Error : Either stream doesnt have enough bits or stream does not contain valid data')
return
if ((number_of_bits > self.size)):
number_of_bits = self.size
if (self.r_bit_index + number_of_bits - 1 < self.array_unit_size):
mask = int(2 ** number_of_bits) - 1
mask = mask << (self.r_bit_index)
ans = (mask & self.array[self.read_index]) >> self.r_bit_index
self.r_bit_index = (self.r_bit_index + number_of_bits) % self.array_unit_size
if (self.r_bit_index == 0):
self.read_index = (self.read_index + 1) % len(self.array)
self.size = self.size - number_of_bits
# print(self.read_index,' ',self.r_bit_index)
# print(bin(ans))
else:
num_bits_frm_cur = self.array_unit_size - self.r_bit_index
num_bits_frm_nxt = number_of_bits - num_bits_frm_cur
ans1 = self.read(num_bits_frm_cur)
if (not (self.r_bit_index == 0)):
self.read_index = (self.read_index + 1) % len(self.array)
ans2 = self.read(num_bits_frm_nxt)
# ans = (ans2<<math.ceil(math.log(ans1+1,2))) + ans1
ans = (ans2 << (num_bits_frm_cur)) + ans1
s2 = self.size
# print(s-s2,'removed : ',ans)
return ans
def write_in_front(self, number_of_bits, val):
s = self.array_unit_size
w = self.w_bit_index
wi = self.write_index
r = self.r_bit_index
ri = self.read_index
rb = ri * s + r
wb = wi * s + w
if (self.size + number_of_bits > self.capacity):
a = self.array
self.array = np.zeros((2 * len(a)), dtype=a.dtype)
self.capacity = 2 * len(a) * s
if (rb < wb):
self.array[0:(wi - ri + 1)] = a[r:(wi + 1)]
self.read_index = 0
self.write_index = wi - ri
else:
self.array[0:wi] = a[0:wi]
self.array[-(len(a) - ri):] = a[ri:]
self.read_index = len(self.array) - (len(a) - ri)
if (number_of_bits + self.w_bit_index - 1 < self.array_unit_size):
x = self.array[self.write_index]
val = val << self.w_bit_index
mask = 2 ** number_of_bits - 1
mask = mask << (self.w_bit_index)
self.array[self.write_index] = (val & mask) + (x & (~mask))
self.w_bit_index = (self.w_bit_index + number_of_bits) % self.array_unit_size
if (self.w_bit_index == 0):
self.write_index = (self.write_index + 1) % len(self.array)
self.size = self.size + number_of_bits
else:
num_bits_in_cur = self.array_unit_size - self.w_bit_index
num_bits_in_nxt = number_of_bits - num_bits_in_cur
self.write(num_bits_in_cur, val)
if (not (self.w_bit_index == 0)):
self.write_index = (self.write_index + 1) % len(self.array)
val = val >> (num_bits_in_cur)
self.write(num_bits_in_nxt, val)
def show(self):
i = self.read_index
while(True):
x = self.array[i]
if(i == self.read_index):
x = x >> self.r_bit_index
print(bin(x))
i=(i+1)%len(self.array)
if((i == self.write_index)):
x = self.array[self.write_index]
if(not(self.w_bit_index == 0)):
x = (int(2**(self.w_bit_index)) - 1) & x
print(bin(x))
break
def get_array(self):
i = 0
size = math.ceil(self.size/self.array_unit_size)
ans = np.zeros((size),dtype = (str(self.array_type)+str(self.array_unit_size)))
ri = self.read_index = 0
rbi = self.r_bit_index = 0
wi = self.write_index = len(ans)
wbi = self.w_bit_index
for i in range(size-1):
#print('unit size',self.array_unit_size)
ans[i] = self.read(self.array_unit_size)
#print(ans[i])
ans[size - 1] = self.read(self.size)
#self.array[0:len(ans)] = ans
self.read_index = ri
self.r_bit_index = rbi
self.write_index = wi
self.w_bit_index = wbi
self.size = size * self.array_unit_size
return ans
def is_empty(self):
return (self.size == 0)
"""
a = np.array([1,2,4,5],dtype='uint8')
bs = bitstream(a);
bs.show()
print(bs.read(3))
print(bs.read(3))
print(bs.read(3))
print("now")
bs.show()
ar = bs.get_array()
print(ar)
print("now")
bs.show()
""" | [
"math.log",
"numpy.zeros",
"math.ceil"
] | [((11166, 11209), 'math.ceil', 'math.ceil', (['(self.size / self.array_unit_size)'], {}), '(self.size / self.array_unit_size)\n', (11175, 11209), False, 'import math\n'), ((438, 464), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': '"""uint8"""'}), "(8, dtype='uint8')\n", (446, 464), True, 'import numpy as np\n'), ((1447, 1486), 'numpy.zeros', 'np.zeros', (['array_size'], {'dtype': 'array.dtype'}), '(array_size, dtype=array.dtype)\n', (1455, 1486), True, 'import numpy as np\n'), ((951, 984), 'math.log', 'math.log', (['self.array_unit_size', '(2)'], {}), '(self.array_unit_size, 2)\n', (959, 984), False, 'import math\n'), ((791, 824), 'math.log', 'math.log', (['self.array_unit_size', '(2)'], {}), '(self.array_unit_size, 2)\n', (799, 824), False, 'import math\n'), ((754, 787), 'math.log', 'math.log', (['self.array_unit_size', '(2)'], {}), '(self.array_unit_size, 2)\n', (762, 787), False, 'import math\n')] |
import sys
import csv
import time
import cvxopt
import numpy as np
import pandas as pd
from svmutil import *
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
# reading data from csv files
def get_data(data_path,issubset,digit1,digit2):
train_data = np.array(pd.read_csv(data_path,header=None,dtype=float).values)
train_output = np.array(train_data[:,784:785])
# True means we have to do binary classification between two digits only
if issubset==True:
train_data = train_data[np.ix_((train_data[:,784]==digit1) | (train_data[:,784]==digit2))]
train_output = train_data[:,784:785]
for i in range(len(train_data)):
if train_output[i,0] == digit1:
train_output[i,0] = 1
else:
train_output[i,0] = -1
train_data = train_data/255
return (np.asmatrix(train_data[:,0:784]),np.asmatrix(train_output))
# plotting the confusion matrix
def draw_confusion(confatrix):
plt.imshow(confatrix)
plt.title("Confusion Matrix")
plt.colorbar()
plt.set_cmap("Greens")
plt.ylabel("True labels")
plt.xlabel("Predicted label")
plt.show()
# Linear kernel using cvxopt for binary classification. Refer to doc attached and report for clarification
def linear_kernel_cvxopt(train_data,train_output,penalty):
m = len(train_data)
X_Y = np.multiply(train_data,train_output)
P = cvxopt.matrix(np.dot(X_Y,X_Y.transpose()))
q = cvxopt.matrix(-1*np.ones((m,1)))
A = cvxopt.matrix(train_output.transpose())
b = cvxopt.matrix(0.0)
tmp1 = -1*np.identity(m)
tmp2 = np.identity(m)
G = cvxopt.matrix(np.vstack((tmp1,tmp2)))
tmp1 = np.zeros((m,1))
tmp2 = penalty*np.ones((m,1))
h = cvxopt.matrix(np.vstack((tmp1,tmp2)))
solution = cvxopt.solvers.qp(P,q,G,h,A,b)
return solution
# Calculating weights for linear kernel and storing them into files
def calculate_linear_svm_params(kernel_soln,train_data,train_output,tolerance):
nSV = 0
(m,n) = (train_data.shape[0],train_data.shape[1])
raveled = np.ravel(kernel_soln['x'])
langrangian_params = np.arange(len(raveled)) [raveled>tolerance]
weight_matrix = np.asmatrix(np.zeros((1,n),dtype=float))
for i in langrangian_params:
for j in range(n):
weight_matrix[0,j]+=(raveled[i]*train_data[i,j]*train_output[i,0])
nSV+=1
# writing indices of support vectors into text file
print("Indices of support vectors have been stored in linear_support_vector_indices.txt")
np.savetxt("linear_support_vector_indices.txt", langrangian_params , delimiter=', ',fmt='%d')
# writing weight matrix into text file
print("Weight matrix has been stored in weight_matrix.txt")
with open('weight_matrix.txt','a') as f:
for line in weight_matrix:
np.savetxt(f, line, fmt='%.2f')
b = 0
if nSV==0:
print("No support vectors found for tolerance value of " + str(tolerance))
else:
for sv_idx in langrangian_params:
b+=(train_output[sv_idx,0] - np.dot(train_data[sv_idx,:],weight_matrix.transpose())[0,0])
b = b/(float(len(langrangian_params)))
print(str(b) + " is the value of b")
return (weight_matrix,b,nSV)
# Calculates prediction over test_data
def linear_kernel_svm_prediction(weight_matrix,b,test_data):
predicted = np.asmatrix(np.zeros((len(test_data),1),dtype=int))
val = np.dot(test_data,weight_matrix.transpose()) + b
predicted = 2*np.multiply((val>0),np.ones((len(test_data),1))) - 1
return predicted
# Gaussian kernel using cvxopt for binary classification
def gaussian_kernel_cvxopt(train_data,train_output,gamma,penalty):
m = len(train_data)
kernel = np.asmatrix(np.zeros((m,m),dtype=float))
X_XT = np.dot(train_data,train_data.transpose())
for i in range(m):
for j in range(m):
kernel[i,j] = float(X_XT[i,i] + X_XT[j,j] - 2*X_XT[i,j])
kernel = np.exp(-1*gamma*kernel)
P = cvxopt.matrix(np.multiply(kernel,np.dot(train_output,train_output.transpose())))
q = cvxopt.matrix(-1*np.ones((m,1)))
A = cvxopt.matrix(train_output.transpose())
b = cvxopt.matrix(0.0)
tmp1 = -1*np.identity(m)
tmp2 = np.identity(m)
G = cvxopt.matrix(np.vstack((tmp1,tmp2)))
tmp1 = np.zeros((m,1))
tmp2 = penalty*np.ones((m,1))
h = cvxopt.matrix(np.vstack((tmp1,tmp2)))
solution = cvxopt.solvers.qp(P,q,G,h,A,b)
return solution
# Prediction using gaussian kernel. b depend on the test sample used; hence is calculated separately for each point
def gaussian_prediction_cvxopt(kernel_soln,train_data,train_output,test_data,tolerance,gamma):
(m,n) = (train_data.shape[0],train_data.shape[1])
raveled = np.ravel(kernel_soln['x'])
nSV = 0
X_train = np.sum(np.multiply(train_data,train_data),axis=1)
X_test = np.sum(np.multiply(test_data,test_data),axis=1)
X_train_X_test = np.dot(train_data,test_data.transpose())
alpha_x_label = np.asmatrix(np.zeros((len(raveled),1),dtype=float))
for i in range(len(raveled)):
if raveled[i]>tolerance:
alpha_x_label[i,0] = train_output[i,0]*raveled[i]
nSV+=1
langrangian_params = np.arange(len(raveled)) [raveled>tolerance]
prediction = np.zeros((len(test_data),1),dtype=int)
# writing indices of support vectors into text file
print("Indices of support vectors have been saved in gaussian_support_vector_indices.txt")
np.savetxt("gaussian_support_vector_indices.txt", langrangian_params , delimiter=', ',fmt='%d')
if len(langrangian_params)<=0:
print("No support vectors found for tolerance value= " + str(tolerance))
else:
b = 0
for sv_idx in langrangian_params:
b+=(train_output[sv_idx,0] - np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*np.sum(np.multiply(train_data-train_data[sv_idx,:],train_data-train_data[sv_idx,:]),axis=1)))))
b = b/(float(len(langrangian_params)))
print(str(b) + " is the value of b")
for i in range(len(test_data)):
prediction[i] = np.sign(np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*(X_train - 2*X_train_X_test[:,i] + X_test[i,0])))) + b)
return (prediction,nSV)
# Gaussian and linear kernel both using libsvm
def libsvm_both(train_data,train_output,test_data,test_output,gamma,penalty):
train_labels = []
train_input = train_data.tolist()
for j in range(train_output.shape[0]):
train_labels.append(train_output[j,0])
test_labels = []
test_input = test_data.tolist()
for j in range(test_output.shape[0]):
test_labels.append(test_output[j,0])
problem = svm_problem(train_labels,train_input)
linear_param = svm_parameter("-s 0 -c 1 -t 0")
linear_model = svm_train(problem,linear_param)
linear_pred_lbl, linear_pred_acc, linear_pred_val = svm_predict(test_labels,test_input,linear_model)
gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma))
gaussian_model = svm_train(problem,gaussian_param)
gaussian_pred_lbl, gaussian_pred_acc, gaussian_pred_val = svm_predict(test_labels,test_input,gaussian_model)
# ENDING OF BINARY CLASSIFICATION FUNCTIONS. BELOW CODE IS FOR MULTICLASS CLASSIFICATION
# multiclass classification using cvxopt and 45 SVMs i.e. one vs all classification
def multiclass_svm_cvxopt(train_data_path,test_data_path,gamma,penalty,tolerance):
svm_dict = {}
num_max = 1
# learning parameters phase
for i in range(1+num_max):
for j in range(i):
idx = str(i)+str(j)
svm_dict[idx] = []
(train_data,train_output) = get_data(train_data_path,True,i,j)
kernel_soln = gaussian_kernel_cvxopt(train_data,train_output,gamma,penalty)
svm_dict[idx] = np.ravel(kernel_soln['x']).tolist()
print("langrangian parameters for svm with index value " + idx + " computed")
# prediction phase
(test_data,test_output) = get_data(test_data_path,False,0,0)
prediction_dict = {}
for i in range(len(test_data)):
prediction_dict[i] = [0,0,0,0,0,0,0,0,0,0]
prediction = np.asmatrix(np.zeros((len(test_data),1),dtype=int))
for i in range(1+num_max):
for j in range(i):
idx = str(i)+str(j)
kernel_soln_x = svm_dict[idx]
(train_data,train_output) = get_data(train_data_path,True,i,j)
svm_prediction = gaussian_prediction_with_alphas(kernel_soln_x,train_data,train_output,test_data,tolerance,gamma)
for k in range(len(svm_prediction)):
if svm_prediction[k,0] == 1:
prediction_dict[k][i]+=1
else:
prediction_dict[k][j]+=1
print("predictions for svm with index value " + idx + " done")
for i in range(len(test_data)):
prediction[i] = np.argmax(prediction_dict[i])
return (test_output,np.array(prediction))
# Helper function for multiclass classification using cvxopt
def gaussian_prediction_with_alphas(kernel_soln_x,train_data,train_output,test_data,tolerance,gamma):
prediction = np.asmatrix(np.ones((len(test_data),1),dtype=int))
raveled = np.asmatrix(kernel_soln_x)
X_train = np.sum(np.multiply(train_data,train_data),axis=1)
X_test = np.sum(np.multiply(test_data,test_data),axis=1)
X_train_X_test = np.dot(train_data,test_data.transpose())
alpha_x_label = np.multiply(train_output,np.multiply(raveled,raveled>tolerance))
langrangian_params = np.nonzero(raveled>tolerance)[0]
if len(langrangian_params)==0:
print("No support vectors found for tolerance value= " + str(tolerance))
else:
b = 0
for sv_idx in langrangian_params:
b+=(train_output[sv_idx,0] - np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*np.sum(np.multiply(train_data-train_data[sv_idx,:],train_data-train_data[sv_idx,:]),axis=1)))))
b = b/(float(len(langrangian_params)))
for i in range(len(test_data)):
prediction[i,0] = np.sign(np.sum(np.multiply(alpha_x_label,np.exp(-1*gamma*(X_train - 2*X_train_X_test[:,i] + X_test[i,0])))) + b)
return prediction
# multiclass classification using libsvm using 45 individual libsvms i.e. one vs all classification
def multiclass_svm_libsvm_45(train_data_path,test_data_path,gamma,penalty):
svm_dict = {}
prediction_dict = {}
num_max = 9
(test_data,test_output) = get_data(test_data_path,False,0,0)
for i in range(len(test_data)):
prediction_dict[i] = [0,0,0,0,0,0,0,0,0,0]
prediction = np.asmatrix(np.zeros((len(test_data),1),dtype=int))
# learning parameters phase (45 individual svms)
for i in range(1+num_max):
for j in range(i):
(train_data,train_output) = get_data(train_data_path,True,i,j)
idx = str(i)+str(j)
train_labels = []
train_input = train_data.tolist()
for i1 in range(train_output.shape[0]):
train_labels.append(train_output[i1,0])
test_labels = []
test_input = test_data.tolist()
for j1 in range(test_output.shape[0]):
test_labels.append(test_output[j1,0])
problem = svm_problem(train_labels,train_input)
gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma))
gaussian_model = svm_train(problem,gaussian_param)
svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(test_labels,test_input,gaussian_model)
for k in range(len(svm_prediction_lbl)):
if svm_prediction_lbl[k] == 1:
prediction_dict[k][i]+=1
else:
prediction_dict[k][j]+=1
print("prediction using gaussian kernel in libsvm completed for " + idx)
for i in range(len(test_data)):
prediction[i] = np.argmax(prediction_dict[i])
return(test_output,prediction)
# Multiclass classification for 10 classes 0-9
def multiclass_svm_libsvm(train_data_path,test_data_path,gamma,penalty):
(train_data,train_output) = get_data(train_data_path,False,0,0)
(test_data,test_output) = get_data(test_data_path,False,0,0)
train_labels = []
train_input = train_data.tolist()
for i1 in range(train_output.shape[0]):
train_labels.append(train_output[i1,0])
test_labels = []
test_input = test_data.tolist()
for j1 in range(test_output.shape[0]):
test_labels.append(test_output[j1,0])
problem = svm_problem(train_labels,train_input)
gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma))
gaussian_model = svm_train(problem,gaussian_param)
svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(test_labels,test_input,gaussian_model)
return (test_output,svm_prediction_lbl)
# MAIN FUNCTION
def main():
train_data_path = sys.argv[1]
test_data_path = sys.argv[2]
classification = sys.argv[3]
part = sys.argv[4]
issubset = (classification=='0')
if issubset==True:
digit1 = 5
digit2 = 6
(train_data,train_output) = get_data(train_data_path,issubset,digit1,digit2)
(test_data,test_output) = get_data(test_data_path,issubset,digit1,digit2)
if part == 'a':
tolerance = 1e-4
penalty = 1
print("tolerance,penalty for linear kernel for binary classification = " + str(tolerance) + "," + str(penalty))
linear_kernel_soln = linear_kernel_cvxopt(train_data,train_output,penalty)
(weight_matrix,b,nSV) = calculate_linear_svm_params(linear_kernel_soln,train_data,train_output,tolerance)
print(str(nSV) + " support vectors")
predicted = linear_kernel_svm_prediction(weight_matrix,b,test_data)
confatrix = confusion_matrix(test_output,predicted)
print("Confusion Matrix")
print(confatrix)
# draw_confusion(confatrix)
elif part =='b':
gamma = 0.05
penalty = 1
tolerance = 1e-4
print("tolerance,penalty,gamma for gaussian kernel for binary classification = " + str(tolerance) + "," + str(penalty) + "," + str(gamma))
gaussian_kernel_soln = gaussian_kernel_cvxopt(train_data,train_output,gamma,penalty)
(predicted,nSV) = gaussian_prediction_cvxopt(gaussian_kernel_soln,train_data,train_output,test_data,tolerance,gamma)
print(str(nSV) + " support vectors")
confatrix = confusion_matrix(test_output,predicted)
print("Confusion Matrix")
print(confatrix)
# draw_confusion(confatrix)
elif part == 'c':
gamma = 0.05
penalty = 1
libsvm_both(train_data,train_output,test_data,test_output,gamma,penalty)
else:
print("No such part for binary classification")
else:
if part == 'a':
gamma = 0.05
penalty = 1
tolerance = 1e-6
print("tolerance value for gaussian kernel for multiclass classification= " + str(tolerance))
(test_output,prediction) = multiclass_svm_cvxopt(train_data_path,test_data_path,gamma,penalty,tolerance)
confatrix = confusion_matrix(test_output,prediction)
print(confatrix)
elif part =='b':
gamma = 0.05
penalty = 1
(test_output,prediction) = multiclass_svm_libsvm(train_data_path,test_data_path,gamma,penalty)
confatrix = confusion_matrix(test_output,prediction)
print(confatrix)
# draw_confusion(confatrix)
elif part == 'd':
gamma = 0.05
penalty_array = [0.00001,0.01,1,5,10]
validation_set_accuracy = np.zeros((1,5),dtype=float)
test_accuracy = np.zeros((1,5),dtype=float)
(train_data,train_output) = get_data(train_data_path,False,0,0)
(test_data,test_output) = get_data(test_data_path,False,0,0)
validation_data_X = train_data[18000:20000,:]
validation_output_Y = train_output[18000:20000,:]
training_data_X = train_data[0:18000,:]
training_output_Y = train_output[0:18000,:]
for i in range(len(penalty_array)):
penalty = penalty_array[i]
train_labels = []
train_input = training_data_X.tolist()
for i1 in range(training_output_Y.shape[0]):
train_labels.append(training_output_Y[i1,0])
validation_labels = []
validation_input = validation_data_X.tolist()
for i1 in range(validation_output_Y.shape[0]):
validation_labels.append(validation_output_Y[i1,0])
test_labels = []
test_input = test_data.tolist()
for j1 in range(test_output.shape[0]):
test_labels.append(test_output[j1,0])
problem = svm_problem(train_labels,train_input)
gaussian_param = svm_parameter("-s 0 -c " + str(penalty) + " -t 2 -g " + str(gamma))
gaussian_model = svm_train(problem,gaussian_param)
svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(test_labels,test_input,gaussian_model)
test_accuracy[i] = svm_prediction_acc[0]
svm_prediction_lbl,svm_prediction_acc,svm_prediction_val = svm_predict(validation_labels,validation_input,gaussian_model)
validation_set_accuracy[i] = svm_prediction_acc[0]
print("Validation Set Accuracy")
print(validation_set_accuracy)
print("Test set Accuracy")
print(test_accuracy)
else:
print("No such part for multiclass classification")
if __name__ == "__main__":
main() | [
"matplotlib.pyplot.title",
"numpy.ravel",
"numpy.argmax",
"pandas.read_csv",
"numpy.ones",
"numpy.exp",
"numpy.multiply",
"matplotlib.pyplot.imshow",
"numpy.savetxt",
"numpy.identity",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.set_cmap",
"cvxopt.solvers.qp",
"matplotlib.pyplot.show"... | [((396, 428), 'numpy.array', 'np.array', (['train_data[:, 784:785]'], {}), '(train_data[:, 784:785])\n', (404, 428), True, 'import numpy as np\n'), ((951, 972), 'matplotlib.pyplot.imshow', 'plt.imshow', (['confatrix'], {}), '(confatrix)\n', (961, 972), True, 'import matplotlib.pyplot as plt\n'), ((974, 1003), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (983, 1003), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1019), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1017, 1019), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1043), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""Greens"""'], {}), "('Greens')\n", (1033, 1043), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True labels"""'], {}), "('True labels')\n", (1055, 1070), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1101), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (1082, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1111, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1346), 'numpy.multiply', 'np.multiply', (['train_data', 'train_output'], {}), '(train_data, train_output)\n', (1320, 1346), True, 'import numpy as np\n'), ((1482, 1500), 'cvxopt.matrix', 'cvxopt.matrix', (['(0.0)'], {}), '(0.0)\n', (1495, 1500), False, 'import cvxopt\n'), ((1535, 1549), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (1546, 1549), True, 'import numpy as np\n'), ((1601, 1617), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (1609, 1617), True, 'import numpy as np\n'), ((1703, 1738), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (1720, 1738), False, 'import cvxopt\n'), ((1971, 1997), 'numpy.ravel', 'np.ravel', (["kernel_soln['x']"], {}), "(kernel_soln['x'])\n", (1979, 1997), True, 'import numpy as np\n'), ((2402, 2499), 'numpy.savetxt', 'np.savetxt', (['"""linear_support_vector_indices.txt"""', 'langrangian_params'], {'delimiter': '""", """', 'fmt': '"""%d"""'}), "('linear_support_vector_indices.txt', langrangian_params,\n delimiter=', ', fmt='%d')\n", (2412, 2499), True, 'import numpy as np\n'), ((3712, 3739), 'numpy.exp', 'np.exp', (['(-1 * gamma * kernel)'], {}), '(-1 * gamma * kernel)\n', (3718, 3739), True, 'import numpy as np\n'), ((3911, 3929), 'cvxopt.matrix', 'cvxopt.matrix', (['(0.0)'], {}), '(0.0)\n', (3924, 3929), False, 'import cvxopt\n'), ((3966, 3980), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (3977, 3980), True, 'import numpy as np\n'), ((4032, 4048), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (4040, 4048), True, 'import numpy as np\n'), ((4134, 4169), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (4151, 4169), False, 'import cvxopt\n'), ((4456, 4482), 'numpy.ravel', 'np.ravel', (["kernel_soln['x']"], {}), "(kernel_soln['x'])\n", (4464, 4482), True, 'import numpy as np\n'), ((5131, 5230), 'numpy.savetxt', 'np.savetxt', (['"""gaussian_support_vector_indices.txt"""', 'langrangian_params'], {'delimiter': '""", """', 'fmt': '"""%d"""'}), "('gaussian_support_vector_indices.txt', langrangian_params,\n delimiter=', ', fmt='%d')\n", (5141, 5230), True, 'import numpy as np\n'), ((8537, 8563), 'numpy.asmatrix', 'np.asmatrix', (['kernel_soln_x'], {}), '(kernel_soln_x)\n', (8548, 8563), True, 'import numpy as np\n'), ((826, 859), 'numpy.asmatrix', 'np.asmatrix', (['train_data[:, 0:784]'], {}), '(train_data[:, 0:784])\n', (837, 859), True, 'import numpy as np\n'), ((859, 884), 'numpy.asmatrix', 'np.asmatrix', (['train_output'], {}), '(train_output)\n', (870, 884), True, 'import numpy as np\n'), ((1512, 1526), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (1523, 1526), True, 'import numpy as np\n'), ((1569, 1592), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (1578, 1592), True, 'import numpy as np\n'), ((1633, 1648), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (1640, 1648), True, 'import numpy as np\n'), ((1667, 1690), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (1676, 1690), True, 'import numpy as np\n'), ((2093, 2122), 'numpy.zeros', 'np.zeros', (['(1, n)'], {'dtype': 'float'}), '((1, n), dtype=float)\n', (2101, 2122), True, 'import numpy as np\n'), ((3522, 3551), 'numpy.zeros', 'np.zeros', (['(m, m)'], {'dtype': 'float'}), '((m, m), dtype=float)\n', (3530, 3551), True, 'import numpy as np\n'), ((3943, 3957), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (3954, 3957), True, 'import numpy as np\n'), ((4000, 4023), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (4009, 4023), True, 'import numpy as np\n'), ((4064, 4079), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (4071, 4079), True, 'import numpy as np\n'), ((4098, 4121), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (4107, 4121), True, 'import numpy as np\n'), ((4511, 4546), 'numpy.multiply', 'np.multiply', (['train_data', 'train_data'], {}), '(train_data, train_data)\n', (4522, 4546), True, 'import numpy as np\n'), ((4571, 4604), 'numpy.multiply', 'np.multiply', (['test_data', 'test_data'], {}), '(test_data, test_data)\n', (4582, 4604), True, 'import numpy as np\n'), ((8224, 8253), 'numpy.argmax', 'np.argmax', (['prediction_dict[i]'], {}), '(prediction_dict[i])\n', (8233, 8253), True, 'import numpy as np\n'), ((8275, 8295), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (8283, 8295), True, 'import numpy as np\n'), ((8584, 8619), 'numpy.multiply', 'np.multiply', (['train_data', 'train_data'], {}), '(train_data, train_data)\n', (8595, 8619), True, 'import numpy as np\n'), ((8644, 8677), 'numpy.multiply', 'np.multiply', (['test_data', 'test_data'], {}), '(test_data, test_data)\n', (8655, 8677), True, 'import numpy as np\n'), ((8787, 8828), 'numpy.multiply', 'np.multiply', (['raveled', '(raveled > tolerance)'], {}), '(raveled, raveled > tolerance)\n', (8798, 8828), True, 'import numpy as np\n'), ((8849, 8880), 'numpy.nonzero', 'np.nonzero', (['(raveled > tolerance)'], {}), '(raveled > tolerance)\n', (8859, 8880), True, 'import numpy as np\n'), ((10951, 10980), 'numpy.argmax', 'np.argmax', (['prediction_dict[i]'], {}), '(prediction_dict[i])\n', (10960, 10980), True, 'import numpy as np\n'), ((325, 373), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'header': 'None', 'dtype': 'float'}), '(data_path, header=None, dtype=float)\n', (336, 373), True, 'import pandas as pd\n'), ((549, 620), 'numpy.ix_', 'np.ix_', (['((train_data[:, 784] == digit1) | (train_data[:, 784] == digit2))'], {}), '((train_data[:, 784] == digit1) | (train_data[:, 784] == digit2))\n', (555, 620), True, 'import numpy as np\n'), ((1416, 1431), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (1423, 1431), True, 'import numpy as np\n'), ((2672, 2703), 'numpy.savetxt', 'np.savetxt', (['f', 'line'], {'fmt': '"""%.2f"""'}), "(f, line, fmt='%.2f')\n", (2682, 2703), True, 'import numpy as np\n'), ((3845, 3860), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (3852, 3860), True, 'import numpy as np\n'), ((12731, 12771), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'predicted'], {}), '(test_output, predicted)\n', (12747, 12771), False, 'from sklearn.metrics import confusion_matrix\n'), ((13929, 13970), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'prediction'], {}), '(test_output, prediction)\n', (13945, 13970), False, 'from sklearn.metrics import confusion_matrix\n'), ((13326, 13366), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'predicted'], {}), '(test_output, predicted)\n', (13342, 13366), False, 'from sklearn.metrics import confusion_matrix\n'), ((14153, 14194), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_output', 'prediction'], {}), '(test_output, prediction)\n', (14169, 14194), False, 'from sklearn.metrics import confusion_matrix\n'), ((7303, 7329), 'numpy.ravel', 'np.ravel', (["kernel_soln['x']"], {}), "(kernel_soln['x'])\n", (7311, 7329), True, 'import numpy as np\n'), ((14351, 14380), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {'dtype': 'float'}), '((1, 5), dtype=float)\n', (14359, 14380), True, 'import numpy as np\n'), ((14398, 14427), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {'dtype': 'float'}), '((1, 5), dtype=float)\n', (14406, 14427), True, 'import numpy as np\n'), ((5740, 5812), 'numpy.exp', 'np.exp', (['(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))'], {}), '(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))\n', (5746, 5812), True, 'import numpy as np\n'), ((9358, 9430), 'numpy.exp', 'np.exp', (['(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))'], {}), '(-1 * gamma * (X_train - 2 * X_train_X_test[:, i] + X_test[i, 0]))\n', (9364, 9430), True, 'import numpy as np\n'), ((5474, 5562), 'numpy.multiply', 'np.multiply', (['(train_data - train_data[sv_idx, :])', '(train_data - train_data[sv_idx, :])'], {}), '(train_data - train_data[sv_idx, :], train_data - train_data[\n sv_idx, :])\n', (5485, 5562), True, 'import numpy as np\n'), ((9129, 9217), 'numpy.multiply', 'np.multiply', (['(train_data - train_data[sv_idx, :])', '(train_data - train_data[sv_idx, :])'], {}), '(train_data - train_data[sv_idx, :], train_data - train_data[\n sv_idx, :])\n', (9140, 9217), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
'''Correction for gaseous absorption based on SMAC method (Rahman and Dedieu, 1994)
'''
from math import *
import numpy as np
# =============================================================================================
def PdeZ(Z):
"""
PdeZ : Atmospheric pressure (in hpa) as a function of altitude (in meters)
"""
p = 1013.25 * pow(1 - 0.0065 * Z / 288.15, 5.31)
return (p)
# =============================================================================================
class coeff:
'''
library for atmospheric correction using SMAC method (Rahman and Dedieu, 1994)
Contains :
smac_inv : inverse smac model for atmospheric correction
TOA==>Surface
smac dir : direct smac model
Surface==>TOA
coefs : reads smac coefficients
PdeZ : # PdeZ : Atmospheric pressure (in hpa) as a function of altitude (in meters)
Written by <NAME>, from the original SMAC C routine
=============================================================================================
'''
def __init__(self, smac_filename):
with file(smac_filename) as f:
lines = f.readlines()
# H20
temp = lines[0].strip().split()
self.ah2o = float(temp[0])
self.nh2o = float(temp[1])
# O3
temp = lines[1].strip().split()
self.ao3 = float(temp[0])
self.no3 = float(temp[1])
# O2
temp = lines[2].strip().split()
self.ao2 = float(temp[0])
self.no2 = float(temp[1])
self.po2 = float(temp[2])
# CO2
temp = lines[3].strip().split()
self.aco2 = float(temp[0])
self.nco2 = float(temp[1])
self.pco2 = float(temp[2])
# NH4
temp = lines[4].strip().split()
self.ach4 = float(temp[0])
self.nch4 = float(temp[1])
self.pch4 = float(temp[2])
# NO2
temp = lines[5].strip().split()
self.ano2 = float(temp[0])
self.nno2 = float(temp[1])
self.pno2 = float(temp[2])
# CO
temp = lines[6].strip().split()
self.aco = float(temp[0])
self.nco = float(temp[1])
self.pco = float(temp[2])
# rayleigh and aerosol scattering
temp = lines[7].strip().split()
self.a0s = float(temp[0])
self.a1s = float(temp[1])
self.a2s = float(temp[2])
self.a3s = float(temp[3])
temp = lines[8].strip().split()
self.a0T = float(temp[0])
self.a1T = float(temp[1])
self.a2T = float(temp[2])
self.a3T = float(temp[3])
temp = lines[9].strip().split()
self.taur = float(temp[0])
self.sr = float(temp[0])
temp = lines[10].strip().split()
self.a0taup = float(temp[0])
self.a1taup = float(temp[1])
temp = lines[11].strip().split()
self.wo = float(temp[0])
self.gc = float(temp[1])
temp = lines[12].strip().split()
self.a0P = float(temp[0])
self.a1P = float(temp[1])
self.a2P = float(temp[2])
temp = lines[13].strip().split()
self.a3P = float(temp[0])
self.a4P = float(temp[1])
temp = lines[14].strip().split()
self.Rest1 = float(temp[0])
self.Rest2 = float(temp[1])
temp = lines[15].strip().split()
self.Rest3 = float(temp[0])
self.Rest4 = float(temp[1])
temp = lines[16].strip().split()
self.Resr1 = float(temp[0])
self.Resr2 = float(temp[1])
self.Resr3 = float(temp[2])
temp = lines[17].strip().split()
self.Resa1 = float(temp[0])
self.Resa2 = float(temp[1])
temp = lines[18].strip().split()
self.Resa3 = float(temp[0])
self.Resa4 = float(temp[1])
# ======================================================================
def smac_inv(r_toa, tetas, phis, tetav, phiv, pressure, taup550, uo3, uh2o, coef):
"""
r_surf=smac_inv( r_toa, tetas, phis, tetav, phiv,pressure,taup550, uo3, uh2o, coef)
Corrections atmosphériques
"""
ah2o = coef.ah2o
nh2o = coef.nh2o
ao3 = coef.ao3
no3 = coef.no3
ao2 = coef.ao2
no2 = coef.no2
po2 = coef.po2
aco2 = coef.aco2
nco2 = coef.nco2
pco2 = coef.pco2
ach4 = coef.ach4
nch4 = coef.nch4
pch4 = coef.pch4
ano2 = coef.ano2
nno2 = coef.nno2
pno2 = coef.pno2
aco = coef.aco
nco = coef.nco
pco = coef.pco
a0s = coef.a0s
a1s = coef.a1s
a2s = coef.a2s
a3s = coef.a3s
a0T = coef.a0T
a1T = coef.a1T
a2T = coef.a2T
a3T = coef.a3T
taur = coef.taur
sr = coef.sr
a0taup = coef.a0taup
a1taup = coef.a1taup
wo = coef.wo
gc = coef.gc
a0P = coef.a0P
a1P = coef.a1P
a2P = coef.a2P
a3P = coef.a3P
a4P = coef.a4P
Rest1 = coef.Rest1
Rest2 = coef.Rest2
Rest3 = coef.Rest3
Rest4 = coef.Rest4
Resr1 = coef.Resr1
Resr2 = coef.Resr2
Resr3 = coef.Resr3
Resa1 = coef.Resa1
Resa2 = coef.Resa2
Resa3 = coef.Resa3
Resa4 = coef.Resa4
cdr = pi / 180
crd = 180 / pi
# /*------: calcul de la reflectance de surface smac :--------*/
us = cos(tetas * cdr)
uv = cos(tetav * cdr)
Peq = pressure / 1013.25
# /*------: 1) air mass */
m = 1 / us + 1 / uv
# /*------: 2) aerosol optical depth in the spectral band, taup :--------*/
taup = (a0taup) + (a1taup) * taup550
# /*------: 3) gaseous transmissions (downward and upward paths) :--------*/
to3 = 1.
th2o = 1.
to2 = 1.
tco2 = 1.
tch4 = 1.
uo2 = (Peq ** (po2))
uco2 = (Peq ** (pco2))
uch4 = (Peq ** (pch4))
uno2 = (Peq ** (pno2))
uco = (Peq ** (pco))
# /*------: 4) if uh2o <= 0 and uo3 <=0 no gaseous absorption is computed :--------*/
to3 = exp((ao3) * ((uo3 * m) ** (no3)))
th2o = exp((ah2o) * ((uh2o * m) ** (nh2o)))
to2 = exp((ao2) * ((uo2 * m) ** (no2)))
tco2 = exp((aco2) * ((uco2 * m) ** (nco2)))
tch4 = exp((ach4) * ((uch4 * m) ** (nch4)))
tno2 = exp((ano2) * ((uno2 * m) ** (nno2)))
tco = exp((aco) * ((uco * m) ** (nco)))
tg = th2o * to3 * to2 * tco2 * tch4 * tco * tno2
# /*------: 5) Total scattering transmission :--------*/
ttetas = (a0T) + (a1T) * taup550 / us + ((a2T) * Peq + (a3T)) / (1. + us) # /* downward */
ttetav = (a0T) + (a1T) * taup550 / uv + ((a2T) * Peq + (a3T)) / (1. + uv) # /* upward */
# /*------: 6) spherical albedo of the atmosphere :--------*/
s = (a0s) * Peq + (a3s) + (a1s) * taup550 + (a2s) * (taup550 ** 2)
# /*------: 7) scattering angle cosine :--------*/
cksi = - ((us * uv) + (sqrt(1. - us * us) * sqrt(1. - uv * uv) * cos((phis - phiv) * cdr)))
if (cksi < -1):
cksi = -1.0
# /*------: 8) scattering angle in degree :--------*/
ksiD = crd * acos(cksi)
# /*------: 9) rayleigh atmospheric reflectance :--------*/
ray_phase = 0.7190443 * (1. + (cksi * cksi)) + 0.0412742
ray_ref = (taur * ray_phase) / (4 * us * uv)
ray_ref = ray_ref * pressure / 1013.25
taurz = (taur) * Peq
# /*------: 10) Residu Rayleigh :--------*/
Res_ray = Resr1 + Resr2 * taur * ray_phase / (us * uv) + Resr3 * ((taur * ray_phase / (us * uv)) ** 2)
# /*------: 11) aerosol atmospheric reflectance :--------*/
aer_phase = a0P + a1P * ksiD + a2P * ksiD * ksiD + a3P * (ksiD ** 3) + a4P * (ksiD ** 4)
ak2 = (1. - wo) * (3. - wo * 3 * gc)
ak = sqrt(ak2)
e = -3 * us * us * wo / (4 * (1. - ak2 * us * us))
f = -(1. - wo) * 3 * gc * us * us * wo / (4 * (1. - ak2 * us * us))
dp = e / (3 * us) + us * f
d = e + f
b = 2 * ak / (3. - wo * 3 * gc)
delta = np.exp(ak * taup) * (1. + b) * (1. + b) - np.exp(-ak * taup) * (1. - b) * (1. - b)
ww = wo / 4.
ss = us / (1. - ak2 * us * us)
q1 = 2. + 3 * us + (1. - wo) * 3 * gc * us * (1. + 2 * us)
q2 = 2. - 3 * us - (1. - wo) * 3 * gc * us * (1. - 2 * us)
q3 = q2 * np.exp(-taup / us)
c1 = ((ww * ss) / delta) * (q1 * np.exp(ak * taup) * (1. + b) + q3 * (1. - b))
c2 = -((ww * ss) / delta) * (q1 * np.exp(-ak * taup) * (1. - b) + q3 * (1. + b))
cp1 = c1 * ak / (3. - wo * 3 * gc)
cp2 = -c2 * ak / (3. - wo * 3 * gc)
z = d - wo * 3 * gc * uv * dp + wo * aer_phase / 4.
x = c1 - wo * 3 * gc * uv * cp1
y = c2 - wo * 3 * gc * uv * cp2
aa1 = uv / (1. + ak * uv)
aa2 = uv / (1. - ak * uv)
aa3 = us * uv / (us + uv)
aer_ref = x * aa1 * (1. - np.exp(-taup / aa1))
aer_ref = aer_ref + y * aa2 * (1. - np.exp(-taup / aa2))
aer_ref = aer_ref + z * aa3 * (1. - np.exp(-taup / aa3))
aer_ref = aer_ref / (us * uv)
# /*------: 12) Residu Aerosol :--------*/
Res_aer = (Resa1 + Resa2 * (taup * m * cksi) + Resa3 * ((taup * m * cksi) ** 2)) + Resa4 * ((taup * m * cksi) ** 3)
# /*------: 13) Terme de couplage molecule / aerosol :--------*/
tautot = taup + taurz
Res_6s = (Rest1 + Rest2 * (tautot * m * cksi) + Rest3 * ((tautot * m * cksi) ** 2)) + Rest4 * (
(tautot * m * cksi) ** 3)
# /*------: 14) total atmospheric reflectance :--------*/
atm_ref = ray_ref - Res_ray + aer_ref - Res_aer + Res_6s
# /*------: 15) Surface reflectance :--------*/
r_surf = r_toa - (atm_ref * tg)
r_surf = r_surf / ((tg * ttetas * ttetav) + (r_surf * s))
return r_surf
# =======================================================================================================
def smac_dir(r_surf, tetas, phis, tetav, phiv, pressure, taup550, uo3, uh2o, coef):
"""
r_toa=smac_dir ( r_surf, tetas, phis, tetav, phiv,pressure,taup550, uo3, uh2o, coef)
Application des effets atmosphériques
"""
ah2o = coef.ah2o
nh2o = coef.nh2o
ao3 = coef.ao3
no3 = coef.no3
ao2 = coef.ao2
no2 = coef.no2
po2 = coef.po2
aco2 = coef.aco2
nco2 = coef.nco2
pco2 = coef.pco2
ach4 = coef.ach4
nch4 = coef.nch4
pch4 = coef.pch4
ano2 = coef.ano2
nno2 = coef.nno2
pno2 = coef.pno2
aco = coef.aco
nco = coef.nco
pco = coef.pco
a0s = coef.a0s
a1s = coef.a1s
a2s = coef.a2s
a3s = coef.a3s
a0T = coef.a0T
a1T = coef.a1T
a2T = coef.a2T
a3T = coef.a3T
taur = coef.taur
sr = coef.sr
a0taup = coef.a0taup
a1taup = coef.a1taup
wo = coef.wo
gc = coef.gc
a0P = coef.a0P
a1P = coef.a1P
a2P = coef.a2P
a3P = coef.a3P
a4P = coef.a4P
Rest1 = coef.Rest1
Rest2 = coef.Rest2
Rest3 = coef.Rest3
Rest4 = coef.Rest4
Resr1 = coef.Resr1
Resr2 = coef.Resr2
Resr3 = coef.Resr3
Resa1 = coef.Resa1
Resa2 = coef.Resa2
Resa3 = coef.Resa3
Resa4 = coef.Resa4
cdr = pi / 180
crd = 180 / pi
# /*------: calcul de la reflectance de surface smac :--------*/
us = cos(tetas * cdr)
uv = cos(tetav * cdr)
Peq = pressure / 1013.25
# /*------: 1) air mass */
m = 1 / us + 1 / uv
# /*------: 2) aerosol optical depth in the spectral band, taup :--------*/
taup = (a0taup) + (a1taup) * taup550
# /*------: 3) gaseous transmissions (downward and upward paths) :--------*/
to3 = 1.
th2o = 1.
to2 = 1.
tco2 = 1.
tch4 = 1.
uo2 = (Peq ** (po2))
uco2 = (Peq ** (pco2))
uch4 = (Peq ** (pch4))
uno2 = (Peq ** (pno2))
uco = (Peq ** (pco))
# /*------: 4) if uh2o <= 0 and uo3<= 0 no gaseous absorption is computed :--------*/
to3 = exp((ao3) * ((uo3 * m) ** (no3)))
th2o = exp((ah2o) * ((uh2o * m) ** (nh2o)))
to2 = exp((ao2) * ((uo2 * m) ** (no2)))
tco2 = exp((aco2) * ((uco2 * m) ** (nco2)))
tch4 = exp((ach4) * ((uch4 * m) ** (nch4)))
tno2 = exp((ano2) * ((uno2 * m) ** (nno2)))
tco = exp((aco) * ((uco * m) ** (nco)))
tg = th2o * to3 * to2 * tco2 * tch4 * tco * tno2
# /*------: 5) Total scattering transmission :--------*/
ttetas = (a0T) + (a1T) * taup550 / us + ((a2T) * Peq + (a3T)) / (1. + us) # /* downward */
ttetav = (a0T) + (a1T) * taup550 / uv + ((a2T) * Peq + (a3T)) / (1. + uv) # /* upward */
# /*------: 6) spherical albedo of the atmosphere :--------*/
s = (a0s) * Peq + (a3s) + (a1s) * taup550 + (a2s) * (taup550 ** 2)
# /*------: 7) scattering angle cosine :--------*/
cksi = - ((us * uv) + (sqrt(1. - us * us) * sqrt(1. - uv * uv) * cos((phis - phiv - 360) * cdr)))
if (cksi < -1):
cksi = -1.0
# /*------: 8) scattering angle in degree :--------*/
ksiD = crd * acos(cksi)
# /*------: 9) rayleigh atmospheric reflectance :--------*/
ray_phase = 0.7190443 * (1. + (cksi * cksi)) + 0.0412742
ray_ref = (taur * ray_phase) / (4 * us * uv)
ray_ref = ray_ref * pressure / 1013.25
taurz = (taur) * Peq
# /*------: 10) Residu Rayleigh :--------*/
Res_ray = Resr1 + Resr2 * taur * ray_phase / (us * uv) + Resr3 * ((taur * ray_phase / (us * uv)) ** 2)
# /*------: 11) aerosol atmospheric reflectance :--------*/
aer_phase = a0P + a1P * ksiD + a2P * ksiD * ksiD + a3P * (ksiD ** 3) + a4P * (ksiD ** 4)
ak2 = (1. - wo) * (3. - wo * 3 * gc)
ak = sqrt(ak2)
e = -3 * us * us * wo / (4 * (1. - ak2 * us * us))
f = -(1. - wo) * 3 * gc * us * us * wo / (4 * (1. - ak2 * us * us))
dp = e / (3 * us) + us * f
d = e + f
b = 2 * ak / (3. - wo * 3 * gc)
delta = np.exp(ak * taup) * (1. + b) * (1. + b) - np.exp(-ak * taup) * (1. - b) * (1. - b)
ww = wo / 4.
ss = us / (1. - ak2 * us * us)
q1 = 2. + 3 * us + (1. - wo) * 3 * gc * us * (1. + 2 * us)
q2 = 2. - 3 * us - (1. - wo) * 3 * gc * us * (1. - 2 * us)
q3 = q2 * np.exp(-taup / us)
c1 = ((ww * ss) / delta) * (q1 * np.exp(ak * taup) * (1. + b) + q3 * (1. - b))
c2 = -((ww * ss) / delta) * (q1 * np.exp(-ak * taup) * (1. - b) + q3 * (1. + b))
cp1 = c1 * ak / (3. - wo * 3 * gc)
cp2 = -c2 * ak / (3. - wo * 3 * gc)
z = d - wo * 3 * gc * uv * dp + wo * aer_phase / 4.
x = c1 - wo * 3 * gc * uv * cp1
y = c2 - wo * 3 * gc * uv * cp2
aa1 = uv / (1. + ak * uv)
aa2 = uv / (1. - ak * uv)
aa3 = us * uv / (us + uv)
aer_ref = x * aa1 * (1. - np.exp(-taup / aa1))
aer_ref = aer_ref + y * aa2 * (1. - np.exp(-taup / aa2))
aer_ref = aer_ref + z * aa3 * (1. - np.exp(-taup / aa3))
aer_ref = aer_ref / (us * uv)
# /*------: 12) Residu Aerosol :--------*/
Res_aer = (Resa1 + Resa2 * (taup * m * cksi) + Resa3 * ((taup * m * cksi) ** 2)) + Resa4 * ((taup * m * cksi) ** 3)
# /*------: 13) Terme de couplage molecule / aerosol :--------*/
tautot = taup + taurz
Res_6s = (Rest1 + Rest2 * (tautot * m * cksi) + Rest3 * ((tautot * m * cksi) ** 2)) + Rest4 * (
(tautot * m * cksi) ** 3)
# /*------: 14) total atmospheric reflectance :--------*/
atm_ref = ray_ref - Res_ray + aer_ref - Res_aer + Res_6s
# /*------: 15) TOA reflectance :--------*/
r_toa = r_surf * tg * ttetas * ttetav / (1 - r_surf * s) + (atm_ref * tg)
return r_toa
# =============================================================================
if __name__ == "__main__":
# example
theta_s = 45
theta_v = 5
phi_s = 200
phi_v = -160
r_toa = 0.2
######################################lecture des coefs_smac
nom_smac = 'COEFS/coef_FORMOSAT2_B1_CONT.dat'
coefs = coeff(nom_smac)
bd = 1
r_surf = smac_inv(r_toa, theta_s, phi_s, theta_v, phi_v, 1013, 0.1, 0.3, 0.3, coefs)
r_toa2 = smac_dir(r_surf, theta_s, phi_s, theta_v, phi_v, 1013, 0.1, 0.3, 0.3, coefs)
print(r_toa, r_surf, r_toa2)
| [
"numpy.exp"
] | [((8207, 8225), 'numpy.exp', 'np.exp', (['(-taup / us)'], {}), '(-taup / us)\n', (8213, 8225), True, 'import numpy as np\n'), ((14032, 14050), 'numpy.exp', 'np.exp', (['(-taup / us)'], {}), '(-taup / us)\n', (14038, 14050), True, 'import numpy as np\n'), ((8722, 8741), 'numpy.exp', 'np.exp', (['(-taup / aa1)'], {}), '(-taup / aa1)\n', (8728, 8741), True, 'import numpy as np\n'), ((14547, 14566), 'numpy.exp', 'np.exp', (['(-taup / aa1)'], {}), '(-taup / aa1)\n', (14553, 14566), True, 'import numpy as np\n'), ((7932, 7949), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (7938, 7949), True, 'import numpy as np\n'), ((7974, 7992), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (7980, 7992), True, 'import numpy as np\n'), ((8783, 8802), 'numpy.exp', 'np.exp', (['(-taup / aa2)'], {}), '(-taup / aa2)\n', (8789, 8802), True, 'import numpy as np\n'), ((8844, 8863), 'numpy.exp', 'np.exp', (['(-taup / aa3)'], {}), '(-taup / aa3)\n', (8850, 8863), True, 'import numpy as np\n'), ((13757, 13774), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (13763, 13774), True, 'import numpy as np\n'), ((13799, 13817), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (13805, 13817), True, 'import numpy as np\n'), ((14608, 14627), 'numpy.exp', 'np.exp', (['(-taup / aa2)'], {}), '(-taup / aa2)\n', (14614, 14627), True, 'import numpy as np\n'), ((14669, 14688), 'numpy.exp', 'np.exp', (['(-taup / aa3)'], {}), '(-taup / aa3)\n', (14675, 14688), True, 'import numpy as np\n'), ((8263, 8280), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (8269, 8280), True, 'import numpy as np\n'), ((8347, 8365), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (8353, 8365), True, 'import numpy as np\n'), ((14088, 14105), 'numpy.exp', 'np.exp', (['(ak * taup)'], {}), '(ak * taup)\n', (14094, 14105), True, 'import numpy as np\n'), ((14172, 14190), 'numpy.exp', 'np.exp', (['(-ak * taup)'], {}), '(-ak * taup)\n', (14178, 14190), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import math
import os
import glob
import scipy.io
#=======================================================================================================================
#Helper functions to load pretrained weights
#=======================================================================================================================
def get_weight(weight_name, weight_dict):
if weight_dict is None:
print("Can't find weight")
return None
else:
return weight_dict.get(weight_name) # returns None if name is not found in dictionary
def load_weights(weight_dir):
weight_path_all = glob.glob(os.path.join(weight_dir, "*.txt.npz"))
pretrained_weight_dict = {}
print(len(weight_path_all))
for path in weight_path_all:
with np.load(path) as data:
layer_name = os.path.basename(path).split('.')[0]
print(layer_name)
pretrained_weight_dict[layer_name] = data['arr_0']
print(data['arr_0'].shape)
return pretrained_weight_dict
def load_z_mapping_function(z, output_channel, weight, bias, scope, act=None):
with tf.variable_scope(scope) as sc:
w = tf.get_variable('w', initializer=weight, trainable=False)
b = tf.get_variable('biases', initializer=bias, trainable=False)
if act == "lrelu":
print ("LRELU")
out = lrelu(tf.matmul(z, w) + b)
else:
out = act(tf.matmul(z, w) + b)
return out[:, :output_channel], out[:, output_channel:]
def load_weights(weight_dir):
weight_path_all = glob.glob(os.path.join(weight_dir, "*.txt.npz"))
pretrained_weight_dict = {}
print(len(weight_path_all))
for path in weight_path_all:
with np.load(path) as data:
layer_name = os.path.basename(path).split('.')[0]
print(layer_name)
pretrained_weight_dict[layer_name] = data['arr_0']
return pretrained_weight_dict
#=======================================================================================================================
def save_txt_file(pred, name, SAVE_DIR):
with open(os.path.join(SAVE_DIR, "{0}.txt".format(name)), 'w') as fp:
for i in pred:
# print(tuple(point.tolist()))
fp.write("{0}\n".format(i))
def transform_tensor_to_image (tensor):
t = tf.transpose(tensor, [0 , 2, 1, 3])
return t[:,::-1, :, :]
def transform_voxel_to_match_image(tensor):
tensor = tf.transpose(tensor, [0, 2, 1, 3, 4])
tensor = tensor[:, ::-1, :, :, :]
return tensor
def transform_image_to_match_voxel(tensor):
tensor = tf.transpose(tensor, [0, 2, 1, 3])
tensor = tensor[:, ::-1, :, :]
return tensor
def np_transform_tensor_to_image (tensor):
t = np.transpose(tensor, [0, 2, 1, 3])
return t
| [
"numpy.load",
"os.path.basename",
"numpy.transpose",
"tensorflow.variable_scope",
"tensorflow.transpose",
"tensorflow.matmul",
"os.path.join",
"tensorflow.get_variable"
] | [((2376, 2410), 'tensorflow.transpose', 'tf.transpose', (['tensor', '[0, 2, 1, 3]'], {}), '(tensor, [0, 2, 1, 3])\n', (2388, 2410), True, 'import tensorflow as tf\n'), ((2497, 2534), 'tensorflow.transpose', 'tf.transpose', (['tensor', '[0, 2, 1, 3, 4]'], {}), '(tensor, [0, 2, 1, 3, 4])\n', (2509, 2534), True, 'import tensorflow as tf\n'), ((2649, 2683), 'tensorflow.transpose', 'tf.transpose', (['tensor', '[0, 2, 1, 3]'], {}), '(tensor, [0, 2, 1, 3])\n', (2661, 2683), True, 'import tensorflow as tf\n'), ((2789, 2823), 'numpy.transpose', 'np.transpose', (['tensor', '[0, 2, 1, 3]'], {}), '(tensor, [0, 2, 1, 3])\n', (2801, 2823), True, 'import numpy as np\n'), ((677, 714), 'os.path.join', 'os.path.join', (['weight_dir', '"""*.txt.npz"""'], {}), "(weight_dir, '*.txt.npz')\n", (689, 714), False, 'import os\n'), ((1166, 1190), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (1183, 1190), True, 'import tensorflow as tf\n'), ((1210, 1267), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""'], {'initializer': 'weight', 'trainable': '(False)'}), "('w', initializer=weight, trainable=False)\n", (1225, 1267), True, 'import tensorflow as tf\n'), ((1280, 1340), 'tensorflow.get_variable', 'tf.get_variable', (['"""biases"""'], {'initializer': 'bias', 'trainable': '(False)'}), "('biases', initializer=bias, trainable=False)\n", (1295, 1340), True, 'import tensorflow as tf\n'), ((1623, 1660), 'os.path.join', 'os.path.join', (['weight_dir', '"""*.txt.npz"""'], {}), "(weight_dir, '*.txt.npz')\n", (1635, 1660), False, 'import os\n'), ((826, 839), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (833, 839), True, 'import numpy as np\n'), ((1772, 1785), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1779, 1785), True, 'import numpy as np\n'), ((1420, 1435), 'tensorflow.matmul', 'tf.matmul', (['z', 'w'], {}), '(z, w)\n', (1429, 1435), True, 'import tensorflow as tf\n'), ((1475, 1490), 'tensorflow.matmul', 'tf.matmul', (['z', 'w'], {}), '(z, w)\n', (1484, 1490), True, 'import tensorflow as tf\n'), ((874, 896), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (890, 896), False, 'import os\n'), ((1820, 1842), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1836, 1842), False, 'import os\n')] |
import copy
import pytest
import numpy as np
from cotk.dataloader import LanguageGeneration, MSCOCO
from cotk.metric import MetricBase
from cotk.wordvector.wordvector import WordVector
from cotk.wordvector.gloves import Glove
import logging
def setup_module():
import random
random.seed(0)
import numpy as np
np.random.seed(0)
class TestWordVector():
def base_test_init(self, dl):
assert isinstance(dl, WordVector)
with pytest.raises(Exception):
WordVector.load(None, None, None)
WordVector.get_all_subclasses()
assert WordVector.load_class('Glove') == Glove
assert WordVector.load_class('not_subclass') == None
def base_test_load(self, dl):
vocab_list = ['the', 'of']
n_dims = 300
wordvec = dl.load(n_dims, vocab_list)
assert isinstance(wordvec, np.ndarray)
assert wordvec.shape == (len(vocab_list), n_dims)
print(wordvec[1])
assert wordvec[1][0] == -0.076947
vocab_list = ['the', 'word_not_exist']
n_dims = 300
wordvec = dl.load(n_dims, vocab_list)
assert isinstance(wordvec, np.ndarray)
assert wordvec.shape == (len(vocab_list), n_dims)
assert wordvec[0][0] == 0.04656
@pytest.fixture
def load_glove():
def _load_glove():
return Glove("./tests/wordvector/dummy_glove")
return _load_glove
class TestGlove(TestWordVector):
def test_init(self, load_glove):
super().base_test_init(load_glove())
def test_load(self, load_glove):
super().base_test_load(load_glove())
| [
"numpy.random.seed",
"cotk.wordvector.wordvector.WordVector.load_class",
"pytest.raises",
"cotk.wordvector.wordvector.WordVector.get_all_subclasses",
"random.seed",
"cotk.wordvector.gloves.Glove",
"cotk.wordvector.wordvector.WordVector.load"
] | [((279, 293), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (290, 293), False, 'import random\n'), ((315, 332), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (329, 332), True, 'import numpy as np\n'), ((497, 528), 'cotk.wordvector.wordvector.WordVector.get_all_subclasses', 'WordVector.get_all_subclasses', ([], {}), '()\n', (526, 528), False, 'from cotk.wordvector.wordvector import WordVector\n'), ((1187, 1226), 'cotk.wordvector.gloves.Glove', 'Glove', (['"""./tests/wordvector/dummy_glove"""'], {}), "('./tests/wordvector/dummy_glove')\n", (1192, 1226), False, 'from cotk.wordvector.gloves import Glove\n'), ((432, 456), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (445, 456), False, 'import pytest\n'), ((461, 494), 'cotk.wordvector.wordvector.WordVector.load', 'WordVector.load', (['None', 'None', 'None'], {}), '(None, None, None)\n', (476, 494), False, 'from cotk.wordvector.wordvector import WordVector\n'), ((538, 568), 'cotk.wordvector.wordvector.WordVector.load_class', 'WordVector.load_class', (['"""Glove"""'], {}), "('Glove')\n", (559, 568), False, 'from cotk.wordvector.wordvector import WordVector\n'), ((587, 624), 'cotk.wordvector.wordvector.WordVector.load_class', 'WordVector.load_class', (['"""not_subclass"""'], {}), "('not_subclass')\n", (608, 624), False, 'from cotk.wordvector.wordvector import WordVector\n')] |
"""
CEASIOMpy: Conceptual Aircraft Design Software.
Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland
Functions to create the dictionnary of geometric variables needed
for the optimnization routine.
Python version: >=3.6
| Author : <NAME>
| Creation: 2020-03-24
| Last modification: 2020-06-02
TODO
----
* Expand the geometric parameters
* Add constrains between the parameters to disable multiple modifications
of the same geometric aspect of the plane
"""
# =============================================================================
# IMPORTS
# =============================================================================
from sys import exit
import numpy as np
import ceasiompy.utils.apmfunctions as apmf
import ceasiompy.utils.cpacsfunctions as cpsf
import ceasiompy.CPACSUpdater.cpacsupdater as cpud
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
# =============================================================================
# GLOBALS
# =============================================================================
# Contains the geometric design variables
geom_var_dict = {}
XPATH = 'None'
# =============================================================================
# FUNCTIONS
# =============================================================================
def add_am_to_dict(optim_var_dict, am_dict):
"""Add aeromap values to variable dictionary.
All values are dupplicated to reach the same number of value than the
aeromap parameters and coefficient. This is done to add the aeromap points
that are not taken into account by the driver, but still computed in one
iteration.
Args:
optim_var_dict (dct): Variable dictionary.
am_dict (dct): Dictionary with the entire aeromap.
Returns:
None.
"""
# Take a variable from the optim dict to compute the length to add
var_in_dict = list(optim_var_dict.keys())[0]
am_length = int(len(am_dict['cl'][1])/len(optim_var_dict[var_in_dict][1]))
log.info("Adding the whole aeromap to the dictionary")
for name, infos in optim_var_dict.items():
if name not in apmf.XSTATES+apmf.COEF_LIST:
# Calling a new list instance else the clear method will also clean l
l = list(infos[1])
infos[1].clear()
infos[1].extend(np.repeat(l, am_length))
for name, infos in am_dict.items():
optim_var_dict[name] = infos
def update_am_dict(tixi, aeromap_uid, am_dict):
"""Save the aeromap results.
Appends the new aeromap results to a dictionary.
Args:
tixi (tixi3 handle): TIXI handle of the CPACS file.
aeromap_uid (str): uID of the aeromap in use.
am_dict (dct): Contains the results of old aeromap calculations.
Returns
None.
"""
Coef = apmf.get_aeromap(tixi, aeromap_uid)
d = Coef.to_dict()
for name , infos in am_dict.items():
infos[1].extend(d[name])
def update_dict(tixi, optim_var_dict):
"""Update dictionnary after one iteration.
The dictionary containing all the problem variables (obj, des, const) is
updated with the new values from the resulting CPACS file after one run
through the all the modules that are contained in one iteration of the
routine.
Args:
tixi (tixi3 handle) : TIXI handle of the CPACS file
optim_var_dict (dict) : Variable dictionary.
Returns:
None.
"""
for name, infos in optim_var_dict.items():
if infos[5] in ['', '-']:
if tixi.checkElement(infos[4]):
new_val = tixi.getDoubleElement(infos[4])
infos[1].append(new_val)
def create_var(var_name, init_value, getcmd, setcmd, lim=0.2):
"""Add design variable to the dictionary.
Add the parameters of one variable to the dictionary, which are saved in a
tuple as (Name, initial value, lower bound, upper bound, setcommand
getcommand).
Args:
var_name (str) : Name of the variable.
init_value (float) :
getcmd (str) : Command to retrieve a value in the CPACS file.
setcmd (str) : Command to change a value in the CPACS file.
lim (float) : Percentage of the initial value to define the upper and lower limit :
init_value*(1-lim) < init_value < init_value*(1+lim)
The default is 0.2.
Returns:
None.
"""
if init_value > 0:
lower_bound = init_value*(1-lim)
upper_bound = init_value*(1+lim)
elif init_value < 0:
lower_bound = init_value*(1+lim)
upper_bound = init_value*(1-lim)
else:
lower_bound = -lim
upper_bound = lim
geom_var_dict[var_name] = (var_name, [init_value], lower_bound, upper_bound, setcmd, getcmd)
def init_elem_param(sec_name, section, elem_nb, scmd):
"""Create wing section element variable.
Add design variables and constrains relative to the wing section elements
to the dictionnary.
Args:
sec_name (str) : Name of the wing section
section (handle) : Handle of the wing section
elem_nb (int) : Number of section elements
scmd (str) : Command to get the section handle
Returns:
None.
"""
for enb in range(1, elem_nb+1):
cmd = scmd + 'get_section_element({}).get_ctigl_section_element().'.format(enb)
el_name = sec_name + "_el" + str(enb)
element = section.get_section_element(enb).get_ctigl_section_element()
var_name = el_name + "_width"
init_width = element.get_width()
getcmd = cmd+'get_width()'
setcmd = cmd+'set_width({})'.format(var_name)
create_var(var_name, init_width, getcmd, setcmd)
def init_sec_param(name, wing, sec_nb, wcmd):
"""Create wing section variable
Add design variables and constrains relative to the wing sections to the
dictionnary.
Args:
name (str) : Name of the wing
wing (handle) : Handle of the wing
sec_nb (int) : Number of wing elements
wcmd (str) : Command to get the wing handle
Returns:
None.
"""
for s in range(1, sec_nb+1):
cmd = wcmd + 'get_section({}).'.format(s)
sec_name = name + "_sec" + str(s)
section = wing.get_section(s)
var_name = sec_name + "_Yrotation"
init_rot = section.get_rotation().y
getcmd = cmd+'get_rotation().y'
setcmd = cmd+'set_rotation(geometry.CTiglPoint(0,{},0))'.format(var_name)
create_var(var_name, init_rot, getcmd, setcmd)
elem_nb = section.get_section_element_count()
if elem_nb:
init_elem_param(sec_name, section, elem_nb, cmd)
def init_wing_param(aircraft, wing_nb):
"""Create wing variable
Add design variables and constrains relative to the wings to the
dictionnary.
Args:
aircraft (handle) : Handle of the aircraft
wing_nb (int) : Number of wings
Returns:
None.
"""
wings = aircraft.get_wings()
for w in range(1, wing_nb+1):
cmd = 'wings.get_wing({}).'.format(w)
name = "wing" + str(w)
wing = wings.get_wing(w)
var_name = name+"_span"
init_span = wing.get_wing_half_span()
getcmd = cmd+'get_wing_half_span()'
setcmd = cmd+'set_half_span_keep_ar({})'.format(var_name) # keep_area
create_var(var_name, init_span, getcmd, setcmd)
var_name = name + "_aspect_ratio"
init_AR = wing.get_aspect_ratio()
getcmd = cmd+'get_aspect_ratio()'
setcmd = cmd+'set_arkeep_area({})'.format(var_name)#keep_ar
create_var(var_name, init_AR, getcmd, setcmd)
var_name = name + "_area"
init_area = wing.get_surface_area()/2
getcmd = cmd+'get_surface_area()'
setcmd = cmd+'set_area_keep_ar({})'.format(var_name)#keep_span
create_var(var_name, init_area, getcmd, setcmd)
var_name = name+"_sweep"
init_sweep = wing.get_sweep()
getcmd = cmd+'get_sweep()'
setcmd = cmd+'set_sweep({})'.format(var_name)
create_var(var_name, init_sweep, getcmd, setcmd)
var_name = name + "_Yrotation"
init_rot = wing.get_rotation().y
getcmd = cmd+'get_rotation().y'
setcmd = cmd+'set_rotation(geometry.CTiglPoint(0,{},0))'.format(var_name)
create_var(var_name, init_rot, getcmd, setcmd)
#A tester....
sec_nb = wing.get_section_count()
if sec_nb:
init_sec_param(name, wing, sec_nb, cmd)
def init_fuse_param(aircraft, fuse_nb):
"""Create fuselage variable
Add design variables and constrains relative to the aircraft fuselages to
the dictionnary.
Args:
aircraft (handle) : Handle of the aircraft
fuse_nb (int) : Number of fuselages
Returns:
None.
"""
for f in range(1, fuse_nb+1):
name = "fuse" + str(f)
fuselage = aircraft.get_fuselage(f)
var_name = name+"_length"
init_length = fuselage.get_length()
getcmd = 'fuselage.get_length()'
setcmd = 'fuselage.set_length({})'.format(var_name)
create_var(var_name, init_length, getcmd, setcmd)
var_name = name+"_width"
init_width = fuselage.get_maximal_width()
getcmd = 'fuselage.get_maximal_width()'
setcmd = 'fuselage.set_max_width({})'.format(var_name)
create_var(var_name, init_width, getcmd, setcmd)
# Modify a specific section width
fnb = fuselage.get_section_count()
if not isinstance(fnb, int):
for secnb in fnb:
var_name = name + "_sec" + str(secnb)
init_sec_width = fuselage.get_maximal_width()
getcmd = 'fuselage.get_maximal_width()'
setcmd = 'fuselage.set_max_width({})'.format(var_name)
create_var(var_name, init_sec_width, getcmd, setcmd)
def init_geom_var_dict(tixi):
"""Create design variable dictionary
Return the dictionary of the design variables using the TIGL library.
Add design variables and constrains relative to the aircraft fuselages to
the dictionnary.
Args:
tixi (handle) : Handle of the CPACS file
Returns:
geom_var_dict (dict) : dictionary with the geometric parameters of
the routine.
"""
tigl = cpsf.open_tigl(tixi)
aircraft = cpud.get_aircraft(tigl)
fuse_nb = aircraft.get_fuselage_count()
if fuse_nb:
init_fuse_param(aircraft, fuse_nb)
wing_nb = aircraft.get_wing_count()
if wing_nb:
init_wing_param(aircraft, wing_nb)
return geom_var_dict
if __name__ == "__main__":
log.info("Launching dictionnary.py programm...")
log.info("Not a standalone programm. Nothing will be executed !")
exit()
| [
"ceasiompy.utils.apmfunctions.get_aeromap",
"ceasiompy.CPACSUpdater.cpacsupdater.get_aircraft",
"ceasiompy.utils.cpacsfunctions.open_tigl",
"sys.exit",
"numpy.repeat"
] | [((2873, 2908), 'ceasiompy.utils.apmfunctions.get_aeromap', 'apmf.get_aeromap', (['tixi', 'aeromap_uid'], {}), '(tixi, aeromap_uid)\n', (2889, 2908), True, 'import ceasiompy.utils.apmfunctions as apmf\n'), ((10398, 10418), 'ceasiompy.utils.cpacsfunctions.open_tigl', 'cpsf.open_tigl', (['tixi'], {}), '(tixi)\n', (10412, 10418), True, 'import ceasiompy.utils.cpacsfunctions as cpsf\n'), ((10434, 10457), 'ceasiompy.CPACSUpdater.cpacsupdater.get_aircraft', 'cpud.get_aircraft', (['tigl'], {}), '(tigl)\n', (10451, 10457), True, 'import ceasiompy.CPACSUpdater.cpacsupdater as cpud\n'), ((10846, 10852), 'sys.exit', 'exit', ([], {}), '()\n', (10850, 10852), False, 'from sys import exit\n'), ((2388, 2411), 'numpy.repeat', 'np.repeat', (['l', 'am_length'], {}), '(l, am_length)\n', (2397, 2411), True, 'import numpy as np\n')] |
# Copyright 2018 The CapsLayer Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.datasets.cifar import load_batch
from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature
URL = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
md5sum = 'eb9058c3a382ffc7106e4002c42a8d85'
def load_cifar100(split, path=None):
if path is None:
cache_path = os.path.join(os.path.expanduser('~'), ".capslayer")
path = get_file('cifar-100-python', cache_dir=cache_path, file_hash=md5sum, origin=URL, untar=True)
split = split.lower()
if split == 'test':
fpath = os.path.join(path, 'test')
images, labels = load_batch(fpath, label_key='fine_labels')
else:
fpath = os.path.join(path, 'train')
images, labels = load_batch(fpath, label_key='fine_labels')
idx = np.arange(len(images))
np.random.seed(201808)
np.random.shuffle(idx)
labels = np.reshape(labels, (-1, ))
images = images[idx[:45000]] if split == "train" else images[idx[45000:]]
labels = labels[idx[:45000]] if split == "train" else labels[idx[45000:]]
images = np.reshape(images.transpose(0, 2, 3, 1), (-1, 3072)).astype(np.float32)
labels = np.reshape(labels, (-1, )).astype(np.int32)
return(zip(images, labels))
def encode_and_write(dataset, filename):
with tf.python_io.TFRecordWriter(filename) as writer:
for image, label in dataset:
print(image.shape)
exit()
image_raw = image.tostring()
example = tf.train.Example(features=tf.train.Features(
feature={'image': bytes_feature(image_raw),
'label': int64_feature(label)}))
writer.write(example.SerializeToString())
def tfrecord_runner(path=None, force=True):
train_set = load_cifar100(path=path, split='train')
eval_set = load_cifar100(path=path, split='eval')
test_set = load_cifar100(path=path, split='test')
if path is None:
path = os.path.join(os.path.expanduser('~'), ".capslayer", "datasets", "cifar100")
if not os.path.exists(path):
os.makedirs(path)
train_set_outpath = os.path.join(path, "train_cifar100.tfrecord")
eval_set_outpath = os.path.join(path, "eval_cifar100.tfrecord")
test_set_outpath = os.path.join(path, "test_cifar100.tfrecord")
if not os.path.exists(train_set_outpath) or force:
encode_and_write(train_set, train_set_outpath)
if not os.path.exists(eval_set_outpath) or force:
encode_and_write(eval_set, eval_set_outpath)
if not os.path.exists(test_set_outpath) or force:
encode_and_write(test_set, test_set_outpath)
if __name__ == "__main__":
data = load_cifar100(split='train')
print(data)
| [
"os.path.expanduser",
"numpy.random.seed",
"tensorflow.python_io.TFRecordWriter",
"os.makedirs",
"tensorflow.python.keras.utils.data_utils.get_file",
"os.path.exists",
"capslayer.data.utils.TFRecordHelper.bytes_feature",
"tensorflow.python.keras.datasets.cifar.load_batch",
"numpy.reshape",
"capsla... | [((3092, 3137), 'os.path.join', 'os.path.join', (['path', '"""train_cifar100.tfrecord"""'], {}), "(path, 'train_cifar100.tfrecord')\n", (3104, 3137), False, 'import os\n'), ((3161, 3205), 'os.path.join', 'os.path.join', (['path', '"""eval_cifar100.tfrecord"""'], {}), "(path, 'eval_cifar100.tfrecord')\n", (3173, 3205), False, 'import os\n'), ((3229, 3273), 'os.path.join', 'os.path.join', (['path', '"""test_cifar100.tfrecord"""'], {}), "(path, 'test_cifar100.tfrecord')\n", (3241, 3273), False, 'import os\n'), ((1310, 1407), 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['"""cifar-100-python"""'], {'cache_dir': 'cache_path', 'file_hash': 'md5sum', 'origin': 'URL', 'untar': '(True)'}), "('cifar-100-python', cache_dir=cache_path, file_hash=md5sum, origin\n =URL, untar=True)\n", (1318, 1407), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n'), ((1470, 1496), 'os.path.join', 'os.path.join', (['path', '"""test"""'], {}), "(path, 'test')\n", (1482, 1496), False, 'import os\n'), ((1522, 1564), 'tensorflow.python.keras.datasets.cifar.load_batch', 'load_batch', (['fpath'], {'label_key': '"""fine_labels"""'}), "(fpath, label_key='fine_labels')\n", (1532, 1564), False, 'from tensorflow.python.keras.datasets.cifar import load_batch\n'), ((1591, 1618), 'os.path.join', 'os.path.join', (['path', '"""train"""'], {}), "(path, 'train')\n", (1603, 1618), False, 'import os\n'), ((1644, 1686), 'tensorflow.python.keras.datasets.cifar.load_batch', 'load_batch', (['fpath'], {'label_key': '"""fine_labels"""'}), "(fpath, label_key='fine_labels')\n", (1654, 1686), False, 'from tensorflow.python.keras.datasets.cifar import load_batch\n'), ((1733, 1755), 'numpy.random.seed', 'np.random.seed', (['(201808)'], {}), '(201808)\n', (1747, 1755), True, 'import numpy as np\n'), ((1764, 1786), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1781, 1786), True, 'import numpy as np\n'), ((1805, 1830), 'numpy.reshape', 'np.reshape', (['labels', '(-1,)'], {}), '(labels, (-1,))\n', (1815, 1830), True, 'import numpy as np\n'), ((2223, 2260), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (2250, 2260), True, 'import tensorflow as tf\n'), ((3019, 3039), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3033, 3039), False, 'import os\n'), ((3049, 3066), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3060, 3066), False, 'import os\n'), ((1256, 1279), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1274, 1279), False, 'import os\n'), ((2094, 2119), 'numpy.reshape', 'np.reshape', (['labels', '(-1,)'], {}), '(labels, (-1,))\n', (2104, 2119), True, 'import numpy as np\n'), ((2945, 2968), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2963, 2968), False, 'import os\n'), ((3286, 3319), 'os.path.exists', 'os.path.exists', (['train_set_outpath'], {}), '(train_set_outpath)\n', (3300, 3319), False, 'import os\n'), ((3396, 3428), 'os.path.exists', 'os.path.exists', (['eval_set_outpath'], {}), '(eval_set_outpath)\n', (3410, 3428), False, 'import os\n'), ((3503, 3535), 'os.path.exists', 'os.path.exists', (['test_set_outpath'], {}), '(test_set_outpath)\n', (3517, 3535), False, 'import os\n'), ((2524, 2548), 'capslayer.data.utils.TFRecordHelper.bytes_feature', 'bytes_feature', (['image_raw'], {}), '(image_raw)\n', (2537, 2548), False, 'from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature\n'), ((2607, 2627), 'capslayer.data.utils.TFRecordHelper.int64_feature', 'int64_feature', (['label'], {}), '(label)\n', (2620, 2627), False, 'from capslayer.data.utils.TFRecordHelper import int64_feature, bytes_feature\n')] |
import torch
import numpy as np
import torch.nn as nn
from math import ceil
from torch.autograd import Variable
from ptsemseg import caffe_pb2
from ptsemseg.models.utils import *
from ptsemseg.loss import *
pspnet_specs = {
'pascalvoc':
{
'n_classes': 21,
'input_size': (473, 473),
'block_config': [3, 4, 23, 3],
},
'cityscapes':
{
'n_classes': 19,
'input_size': (713, 713),
'block_config': [3, 4, 23, 3],
},
'ade20k':
{
'n_classes': 150,
'input_size': (473, 473),
'block_config': [3, 4, 6, 3],
},
}
class pspnet(nn.Module):
"""
Pyramid Scene Parsing Network
URL: https://arxiv.org/abs/1612.01105
References:
1) Original Author's code: https://github.com/hszhao/PSPNet
2) Chainer implementation by @mitmul: https://github.com/mitmul/chainer-pspnet
3) TensorFlow implementation by @hellochick: https://github.com/hellochick/PSPNet-tensorflow
Visualization:
http://dgschwend.github.io/netscope/#/gist/6bfb59e6a3cfcb4e2bb8d47f827c2928
"""
def __init__(self,
n_classes=21,
block_config=[3, 4, 23, 3],
input_size=(473,473),
version=None):
super(pspnet, self).__init__()
self.block_config = pspnet_specs[version]['block_config'] if version is not None else block_config
self.n_classes = pspnet_specs[version]['n_classes'] if version is not None else n_classes
self.input_size = pspnet_specs[version]['input_size'] if version is not None else input_size
# Encoder
self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=64,
padding=1, stride=2, bias=False)
self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64,
padding=1, stride=1, bias=False)
self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=128,
padding=1, stride=1, bias=False)
# Vanilla Residual Blocks
self.res_block2 = residualBlockPSP(self.block_config[0], 128, 64, 256, 1, 1)
self.res_block3 = residualBlockPSP(self.block_config[1], 256, 128, 512, 2, 1)
# Dilated Residual Blocks
self.res_block4 = residualBlockPSP(self.block_config[2], 512, 256, 1024, 1, 2)
self.res_block5 = residualBlockPSP(self.block_config[3], 1024, 512, 2048, 1, 4)
# Pyramid Pooling Module
self.pyramid_pooling = pyramidPooling(2048, [6, 3, 2, 1])
# Final conv layers
self.cbr_final = conv2DBatchNormRelu(4096, 512, 3, 1, 1, False)
self.dropout = nn.Dropout2d(p=0.1, inplace=True)
self.classification = nn.Conv2d(512, self.n_classes, 1, 1, 0)
# Auxiliary layers for training
self.convbnrelu4_aux = conv2DBatchNormRelu(in_channels=1024, k_size=3, n_filters=256, padding=1, stride=1, bias=False)
self.aux_cls = nn.Conv2d(256, self.n_classes, 1, 1, 0)
# Define auxiliary loss function
self.loss = multi_scale_cross_entropy2d
def forward(self, x):
inp_shape = x.shape[2:]
# H, W -> H/2, W/2
x = self.convbnrelu1_1(x)
x = self.convbnrelu1_2(x)
x = self.convbnrelu1_3(x)
# H/2, W/2 -> H/4, W/4
x = F.max_pool2d(x, 3, 2, 1)
# H/4, W/4 -> H/8, W/8
x = self.res_block2(x)
x = self.res_block3(x)
x = self.res_block4(x)
# Auxiliary layers for training
x_aux = self.convbnrelu4_aux(x)
x_aux = self.dropout(x_aux)
x_aux = self.aux_cls(x_aux)
x = self.res_block5(x)
x = self.pyramid_pooling(x)
x = self.cbr_final(x)
x = self.dropout(x)
x = self.classification(x)
x = F.upsample(x, size=inp_shape, mode='bilinear')
if self.training:
return x_aux, x
else: # eval mode
return x
def load_pretrained_model(self, model_path):
"""
Load weights from caffemodel w/o caffe dependency
and plug them in corresponding modules
"""
# My eyes and my heart both hurt when writing this method
# Only care about layer_types that have trainable parameters
ltypes = ['BNData', 'ConvolutionData', 'HoleConvolutionData']
def _get_layer_params(layer, ltype):
if ltype == 'BNData':
gamma = np.array(layer.blobs[0].data)
beta = np.array(layer.blobs[1].data)
mean = np.array(layer.blobs[2].data)
var = np.array(layer.blobs[3].data)
return [mean, var, gamma, beta]
elif ltype in ['ConvolutionData', 'HoleConvolutionData']:
is_bias = layer.convolution_param.bias_term
weights = np.array(layer.blobs[0].data)
bias = []
if is_bias:
bias = np.array(layer.blobs[1].data)
return [weights, bias]
elif ltype == 'InnerProduct':
raise Exception("Fully connected layers {}, not supported".format(ltype))
else:
raise Exception("Unkown layer type {}".format(ltype))
net = caffe_pb2.NetParameter()
with open(model_path, 'rb') as model_file:
net.MergeFromString(model_file.read())
# dict formatted as -> key:<layer_name> :: value:<layer_type>
layer_types = {}
# dict formatted as -> key:<layer_name> :: value:[<list_of_params>]
layer_params = {}
for l in net.layer:
lname = l.name
ltype = l.type
if ltype in ltypes:
print("Processing layer {}".format(lname))
layer_types[lname] = ltype
layer_params[lname] = _get_layer_params(l, ltype)
# Set affine=False for all batchnorm modules
def _no_affine_bn(module=None):
if isinstance(module, nn.BatchNorm2d):
module.affine = False
if len([m for m in module.children()]) > 0:
for child in module.children():
_no_affine_bn(child)
#_no_affine_bn(self)
def _transfer_conv(layer_name, module):
weights, bias = layer_params[layer_name]
w_shape = np.array(module.weight.size())
print("CONV {}: Original {} and trans weights {}".format(layer_name,
w_shape,
weights.shape))
module.weight.data.copy_(torch.from_numpy(weights).view_as(module.weight))
if len(bias) != 0:
b_shape = np.array(module.bias.size())
print("CONV {}: Original {} and trans bias {}".format(layer_name,
b_shape,
bias.shape))
module.bias.data.copy_(torch.from_numpy(bias).view_as(module.bias))
def _transfer_conv_bn(conv_layer_name, mother_module):
conv_module = mother_module[0]
bn_module = mother_module[1]
_transfer_conv(conv_layer_name, conv_module)
mean, var, gamma, beta = layer_params[conv_layer_name+'/bn']
print("BN {}: Original {} and trans weights {}".format(conv_layer_name,
bn_module.running_mean.size(),
mean.shape))
bn_module.running_mean.copy_(torch.from_numpy(mean).view_as(bn_module.running_mean))
bn_module.running_var.copy_(torch.from_numpy(var).view_as(bn_module.running_var))
bn_module.weight.data.copy_(torch.from_numpy(gamma).view_as(bn_module.weight))
bn_module.bias.data.copy_(torch.from_numpy(beta).view_as(bn_module.bias))
def _transfer_residual(prefix, block):
block_module, n_layers = block[0], block[1]
bottleneck = block_module.layers[0]
bottleneck_conv_bn_dic = {prefix + '_1_1x1_reduce': bottleneck.cbr1.cbr_unit,
prefix + '_1_3x3': bottleneck.cbr2.cbr_unit,
prefix + '_1_1x1_proj': bottleneck.cb4.cb_unit,
prefix + '_1_1x1_increase': bottleneck.cb3.cb_unit,}
for k, v in bottleneck_conv_bn_dic.items():
_transfer_conv_bn(k, v)
for layer_idx in range(2, n_layers+1):
residual_layer = block_module.layers[layer_idx-1]
residual_conv_bn_dic = {'_'.join(map(str, [prefix, layer_idx, '1x1_reduce'])): residual_layer.cbr1.cbr_unit,
'_'.join(map(str, [prefix, layer_idx, '3x3'])): residual_layer.cbr2.cbr_unit,
'_'.join(map(str, [prefix, layer_idx, '1x1_increase'])): residual_layer.cb3.cb_unit,}
for k, v in residual_conv_bn_dic.items():
_transfer_conv_bn(k, v)
convbn_layer_mapping = {'conv1_1_3x3_s2': self.convbnrelu1_1.cbr_unit,
'conv1_2_3x3': self.convbnrelu1_2.cbr_unit,
'conv1_3_3x3': self.convbnrelu1_3.cbr_unit,
'conv5_3_pool6_conv': self.pyramid_pooling.paths[0].cbr_unit,
'conv5_3_pool3_conv': self.pyramid_pooling.paths[1].cbr_unit,
'conv5_3_pool2_conv': self.pyramid_pooling.paths[2].cbr_unit,
'conv5_3_pool1_conv': self.pyramid_pooling.paths[3].cbr_unit,
'conv5_4': self.cbr_final.cbr_unit,
'conv4_' + str(self.block_config[2]+1): self.convbnrelu4_aux.cbr_unit,} # Auxiliary layers for training
residual_layers = {'conv2': [self.res_block2, self.block_config[0]],
'conv3': [self.res_block3, self.block_config[1]],
'conv4': [self.res_block4, self.block_config[2]],
'conv5': [self.res_block5, self.block_config[3]],}
# Transfer weights for all non-residual conv+bn layers
for k, v in convbn_layer_mapping.items():
_transfer_conv_bn(k, v)
# Transfer weights for final non-bn conv layer
_transfer_conv('conv6', self.classification)
_transfer_conv('conv6_1', self.aux_cls)
# Transfer weights for all residual layers
for k, v in residual_layers.items():
_transfer_residual(k, v)
def tile_predict(self, imgs, include_flip_mode=True):
"""
Predict by takin overlapping tiles from the image.
Strides are adaptively computed from the imgs shape
and input size
:param imgs: torch.Tensor with shape [N, C, H, W] in BGR format
:param side: int with side length of model input
:param n_classes: int with number of classes in seg output.
"""
side_x, side_y = self.input_size
n_classes = self.n_classes
n_samples, c, h, w = imgs.shape
#n = int(max(h,w) / float(side) + 1)
n_x = int(h / float(side_x) + 1)
n_y = int(w / float(side_y) + 1)
stride_x = ( h - side_x ) / float(n_x)
stride_y = ( w - side_y ) / float(n_y)
x_ends = [[int(i*stride_x), int(i*stride_x) + side_x] for i in range(n_x+1)]
y_ends = [[int(i*stride_y), int(i*stride_y) + side_y] for i in range(n_y+1)]
pred = np.zeros([n_samples, n_classes, h, w])
count = np.zeros([h, w])
slice_count = 0
for sx, ex in x_ends:
for sy, ey in y_ends:
slice_count += 1
imgs_slice = imgs[:, :, sx:ex, sy:ey]
if include_flip_mode:
imgs_slice_flip = torch.from_numpy(np.copy(imgs_slice.cpu().numpy()[:, :, :, ::-1])).float()
is_model_on_cuda = next(self.parameters()).is_cuda
inp = Variable(imgs_slice, volatile=True)
if include_flip_mode:
flp = Variable(imgs_slice_flip, volatile=True)
if is_model_on_cuda:
inp = inp.cuda()
if include_flip_mode:
flp = flp.cuda()
psub1 = F.softmax(self.forward(inp), dim=1).data.cpu().numpy()
if include_flip_mode:
psub2 = F.softmax(self.forward(flp), dim=1).data.cpu().numpy()
psub = (psub1 + psub2[:, :, :, ::-1]) / 2.0
else:
psub = psub1
pred[:, :, sx:ex, sy:ey] = psub
count[sx:ex, sy:ey] += 1.0
score = (pred / count[None, None, ...]).astype(np.float32)
return score / np.expand_dims(score.sum(axis=1), axis=1)
# For Testing Purposes only
if __name__ == '__main__':
cd = 0
import os
from torch.autograd import Variable
import matplotlib.pyplot as plt
import scipy.misc as m
from ptsemseg.loader.cityscapes_loader import cityscapesLoader as cl
psp = pspnet(version='cityscapes')
# Just need to do this one time
caffemodel_dir_path = 'PATH_TO_PSPNET_DIR/evaluation/model'
psp.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'pspnet101_cityscapes.caffemodel'))
#psp.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'pspnet50_ADE20K.caffemodel'))
#psp.load_pretrained_model(model_path=os.path.join(caffemodel_dir_path, 'pspnet101_VOC2012.caffemodel'))
# psp.load_state_dict(torch.load('psp.pth'))
psp.float()
psp.cuda(cd)
psp.eval()
dataset_root_dir = 'PATH_TO_CITYSCAPES_DIR'
dst = cl(root=dataset_root_dir)
img = m.imread(os.path.join(dataset_root_dir, 'leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png'))
m.imsave('cropped.png', img)
orig_size = img.shape[:-1]
img = img.transpose(2, 0, 1)
img = img.astype(np.float64)
img -= np.array([123.68, 116.779, 103.939])[:, None, None]
img = np.copy(img[::-1, :, :])
img = torch.from_numpy(img).float() # convert to torch tensor
img = img.unsqueeze(0)
out = psp.tile_predict(img)
pred = np.argmax(out, axis=1)[0]
decoded = dst.decode_segmap(pred)
m.imsave('cityscapes_sttutgart_tiled.png', decoded)
#m.imsave('cityscapes_sttutgart_tiled.png', pred)
checkpoints_dir_path = 'checkpoints'
if not os.path.exists(checkpoints_dir_path):
os.mkdir(checkpoints_dir_path)
psp = torch.nn.DataParallel(psp, device_ids=range(torch.cuda.device_count())) # append `module.`
state = {'model_state': psp.state_dict()}
torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_101_cityscapes.pth"))
#torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_50_ade20k.pth"))
#torch.save(state, os.path.join(checkpoints_dir_path, "pspnet_101_pascalvoc.pth"))
print("Output Shape {} \t Input Shape {}".format(out.shape, img.shape))
| [
"os.mkdir",
"torch.nn.Dropout2d",
"numpy.copy",
"numpy.argmax",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"numpy.zeros",
"ptsemseg.loader.cityscapes_loader.cityscapesLoader",
"os.path.exists",
"torch.cuda.device_count",
"ptsemseg.caffe_pb2.NetParameter",
"numpy.array",
"scipy.misc.imsave... | [((14241, 14266), 'ptsemseg.loader.cityscapes_loader.cityscapesLoader', 'cl', ([], {'root': 'dataset_root_dir'}), '(root=dataset_root_dir)\n', (14243, 14266), True, 'from ptsemseg.loader.cityscapes_loader import cityscapesLoader as cl\n'), ((14403, 14431), 'scipy.misc.imsave', 'm.imsave', (['"""cropped.png"""', 'img'], {}), "('cropped.png', img)\n", (14411, 14431), True, 'import scipy.misc as m\n'), ((14602, 14626), 'numpy.copy', 'np.copy', (['img[::-1, :, :]'], {}), '(img[::-1, :, :])\n', (14609, 14626), True, 'import numpy as np\n'), ((14832, 14883), 'scipy.misc.imsave', 'm.imsave', (['"""cityscapes_sttutgart_tiled.png"""', 'decoded'], {}), "('cityscapes_sttutgart_tiled.png', decoded)\n", (14840, 14883), True, 'import scipy.misc as m\n'), ((2843, 2876), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {'p': '(0.1)', 'inplace': '(True)'}), '(p=0.1, inplace=True)\n', (2855, 2876), True, 'import torch.nn as nn\n'), ((2907, 2946), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'self.n_classes', '(1)', '(1)', '(0)'], {}), '(512, self.n_classes, 1, 1, 0)\n', (2916, 2946), True, 'import torch.nn as nn\n'), ((3138, 3177), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'self.n_classes', '(1)', '(1)', '(0)'], {}), '(256, self.n_classes, 1, 1, 0)\n', (3147, 3177), True, 'import torch.nn as nn\n'), ((5443, 5467), 'ptsemseg.caffe_pb2.NetParameter', 'caffe_pb2.NetParameter', ([], {}), '()\n', (5465, 5467), False, 'from ptsemseg import caffe_pb2\n'), ((11981, 12019), 'numpy.zeros', 'np.zeros', (['[n_samples, n_classes, h, w]'], {}), '([n_samples, n_classes, h, w])\n', (11989, 12019), True, 'import numpy as np\n'), ((12036, 12052), 'numpy.zeros', 'np.zeros', (['[h, w]'], {}), '([h, w])\n', (12044, 12052), True, 'import numpy as np\n'), ((14286, 14406), 'os.path.join', 'os.path.join', (['dataset_root_dir', '"""leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png"""'], {}), "(dataset_root_dir,\n 'leftImg8bit/demoVideo/stuttgart_00/stuttgart_00_000000_000010_leftImg8bit.png'\n )\n", (14298, 14406), False, 'import os\n'), ((14540, 14576), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (14548, 14576), True, 'import numpy as np\n'), ((14764, 14786), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (14773, 14786), True, 'import numpy as np\n'), ((14992, 15028), 'os.path.exists', 'os.path.exists', (['checkpoints_dir_path'], {}), '(checkpoints_dir_path)\n', (15006, 15028), False, 'import os\n'), ((15038, 15068), 'os.mkdir', 'os.mkdir', (['checkpoints_dir_path'], {}), '(checkpoints_dir_path)\n', (15046, 15068), False, 'import os\n'), ((15238, 15301), 'os.path.join', 'os.path.join', (['checkpoints_dir_path', '"""pspnet_101_cityscapes.pth"""'], {}), "(checkpoints_dir_path, 'pspnet_101_cityscapes.pth')\n", (15250, 15301), False, 'import os\n'), ((13793, 13861), 'os.path.join', 'os.path.join', (['caffemodel_dir_path', '"""pspnet101_cityscapes.caffemodel"""'], {}), "(caffemodel_dir_path, 'pspnet101_cityscapes.caffemodel')\n", (13805, 13861), False, 'import os\n'), ((14637, 14658), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (14653, 14658), False, 'import torch\n'), ((4619, 4648), 'numpy.array', 'np.array', (['layer.blobs[0].data'], {}), '(layer.blobs[0].data)\n', (4627, 4648), True, 'import numpy as np\n'), ((4672, 4701), 'numpy.array', 'np.array', (['layer.blobs[1].data'], {}), '(layer.blobs[1].data)\n', (4680, 4701), True, 'import numpy as np\n'), ((4725, 4754), 'numpy.array', 'np.array', (['layer.blobs[2].data'], {}), '(layer.blobs[2].data)\n', (4733, 4754), True, 'import numpy as np\n'), ((4778, 4807), 'numpy.array', 'np.array', (['layer.blobs[3].data'], {}), '(layer.blobs[3].data)\n', (4786, 4807), True, 'import numpy as np\n'), ((12484, 12519), 'torch.autograd.Variable', 'Variable', (['imgs_slice'], {'volatile': '(True)'}), '(imgs_slice, volatile=True)\n', (12492, 12519), False, 'from torch.autograd import Variable\n'), ((15123, 15148), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15146, 15148), False, 'import torch\n'), ((5013, 5042), 'numpy.array', 'np.array', (['layer.blobs[0].data'], {}), '(layer.blobs[0].data)\n', (5021, 5042), True, 'import numpy as np\n'), ((12584, 12624), 'torch.autograd.Variable', 'Variable', (['imgs_slice_flip'], {'volatile': '(True)'}), '(imgs_slice_flip, volatile=True)\n', (12592, 12624), False, 'from torch.autograd import Variable\n'), ((5124, 5153), 'numpy.array', 'np.array', (['layer.blobs[1].data'], {}), '(layer.blobs[1].data)\n', (5132, 5153), True, 'import numpy as np\n'), ((6857, 6882), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (6873, 6882), False, 'import torch\n'), ((7930, 7952), 'torch.from_numpy', 'torch.from_numpy', (['mean'], {}), '(mean)\n', (7946, 7952), False, 'import torch\n'), ((8026, 8047), 'torch.from_numpy', 'torch.from_numpy', (['var'], {}), '(var)\n', (8042, 8047), False, 'import torch\n'), ((8120, 8143), 'torch.from_numpy', 'torch.from_numpy', (['gamma'], {}), '(gamma)\n', (8136, 8143), False, 'import torch\n'), ((8209, 8231), 'torch.from_numpy', 'torch.from_numpy', (['beta'], {}), '(beta)\n', (8225, 8231), False, 'import torch\n'), ((7277, 7299), 'torch.from_numpy', 'torch.from_numpy', (['bias'], {}), '(bias)\n', (7293, 7299), False, 'import torch\n')] |
import pyOcean_cpu as ocean
import numpy as np
import pyOceanNumpy
a = np.arange(24).reshape([3,2,4])
print(a)
b = ocean.asTensor(a).reverseAxes2()
print(b)
b.fill(3)
b.sync()
print(a)
| [
"numpy.arange",
"pyOcean_cpu.asTensor"
] | [((72, 85), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (81, 85), True, 'import numpy as np\n'), ((116, 133), 'pyOcean_cpu.asTensor', 'ocean.asTensor', (['a'], {}), '(a)\n', (130, 133), True, 'import pyOcean_cpu as ocean\n')] |
##############################################
##### Predicting EUR/USD pair using LSTM #####
##############################################
###################################
### Part 1 - Data Preprocessing ###
###################################
### Importing the libraries ###
import numpy as np
import pandas as pd
### Importing the data set ###
dataset = pd.read_csv('dataset.csv')
### Set basic parameters ###
timesteps = 120
test_size = 0.2 # 0.2 = 20% of the dataset
### Set hyperparameters ###
from keras.optimizers import Adam
parameters = {'hidden_layers': [3, 6],
'units_per_layer': [50, 100, 200],
'dropout': [0.0, 0.2, 0.4],
'batch_size': [128, 256],
'epochs': [100],
'optimizer': [Adam(lr = 0.001)],
'loss': ['mean_squared_error'],
'metrics': ['accuracy']}
### Processing the specific dataset ###
# The code an next assumes that the prediction(y) is the last column of the dataset.
# If your dataset isn't ready, process it here.
# Convert dates to days
# 0 is Monday - 4 is Friday
# Stock exchanges are closed on Weekends
import datetime
for i in range (0, dataset.shape[0]):
dataset.iloc[i,4] = datetime.datetime.strptime(dataset.iloc[i,4], '%m/%d/%Y').weekday()
# We don't need the 2 last columns and we have to make 'Price' column being the last column.
# Swap 'Price' and "RSI' columns
for i in range (0, dataset.shape[0]):
dataset.iloc[i,16] = dataset.iloc[i,3]
dataset.iloc[i,3] = dataset.iloc[i,15]
dataset.iloc[i,15] = dataset.iloc[i,16]
# Delete the unused columns
dataset = dataset.iloc[:,:16]
### Feature Scaling - Normalization ###
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
dataset_scaled = sc.fit_transform(dataset)
### Creating a 3D data structure with [timesteps] timesteps and one output ###
# [Samples, Timesteps, Features]
# x_train(Z) = [Features(Z-1)]
# y_train(Z) = [Feature(Z)]
x = []
y = []
for i in range(timesteps, dataset_scaled.shape[0]):
x.append(dataset_scaled[i-timesteps:i, :dataset_scaled.shape[1]-1])
y.append(dataset_scaled[i, dataset_scaled.shape[1]-1])
x, y = np.array(x), np.array(y)
y = np.reshape(y, (y.shape[0], 1))
### Splitting the dataset into the Training set and Test set ###
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = test_size, random_state = 0)
##################################
### Part 2 - Building the LSTM ###
##################################
### Importing the Keras libraries and packages ###
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
### Build the regressor ###
def build_regressor(hidden_layers, units_per_layer, dropout, optimizer, loss, metrics):
# Initialising the LSTM
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = units_per_layer, return_sequences = True, input_shape = (x_train.shape[1], x_train.shape[2])))
regressor.add(Dropout(dropout))
# Adding new LSTM hidden layers if needed
for i in range(0, hidden_layers-1):
regressor.add(LSTM(units = units_per_layer, return_sequences = True))
regressor.add(Dropout(dropout))
# Adding the pre-last LSTM layer
regressor.add(LSTM(units = units_per_layer))
regressor.add(Dropout(dropout))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the LSTM
regressor.compile(optimizer = optimizer, loss = loss, metrics = metrics)
return regressor
### Train the model ###
def fit_regressor(epochs, batch_size):
return regressor.fit(x_train, y_train, epochs = epochs, batch_size = batch_size, validation_data=(x_test, y_test), shuffle=True)
### Start Evaluating and Tuning our LSTM model ###
import matplotlib.pyplot as plt
results = []
best_parameters = []
best_loss = float("inf")
best_model = Sequential()
for layers in parameters["hidden_layers"]:
for units_per_layer in parameters["units_per_layer"]:
for dropout in parameters["dropout"]:
for batch_size in parameters["batch_size"]:
for epochs in parameters["epochs"]:
for optimizer in parameters["optimizer"]:
for loss in parameters["loss"]:
for metrics in parameters["metrics"]:
regressor = build_regressor(int(layers), units_per_layer, dropout, optimizer, loss, [metrics])
history = fit_regressor(epochs, batch_size)
results.append([layers, units_per_layer, dropout, batch_size, epochs, optimizer, loss, metrics,
float(history.history['loss'][0]), float(history.history['val_loss'][0])])
plt.plot(history.history['val_loss'][2:epochs], color = 'blue', label = 'Test')
plt.plot(history.history['loss'][2:epochs], color = 'red', label = 'Train')
plt.xlabel('Epochs')
plt.ylabel('Error')
plt.legend()
plt.show()
print('Layers:\t\t',layers,'\nUnits per layer:',units_per_layer,'\nDropout:\t',dropout,'\nBatch size:\t', batch_size,
'\nEpochs:\t\t',epochs,'\nOptimizer:\t',optimizer,'\nLoss function:\t',loss,'\nMetrics:\t',metrics,
'\nLoss (Train):\t',history.history['loss'][epochs-1],'\nLoss (Test):\t',history.history['val_loss'][epochs-1],'\n\n')
# Keep the best model
if float(history.history['loss'][epochs-1]) < best_loss:
best_model = regressor
best_loss = float(history.history['loss'][0])
best_parameters.clear()
best_parameters.append([layers, units_per_layer, dropout, batch_size, epochs, optimizer, loss, metrics,
float(history.history['loss'][0]), float(history.history['val_loss'][0]),
float(history.history['acc'][0]), float(history.history['val_acc'][0])])
### Show the best parameters ###
print('************* Best parameters *************')
print('* Layers:\t',best_parameters[0][0],'\n* Units:\t',best_parameters[0][1],'\n* Dropout:\t',best_parameters[0][2],'\n* Batch size:\t',
best_parameters[0][3],'\n* Epochs:\t',best_parameters[0][4],'\n* Optimizer:\t',best_parameters[0][5],'\n* Loss function:',best_parameters[0][6],
'\n* Metrics:\t',best_parameters[0][7],'\n* Loss (Train):\t',best_parameters[0][8],'\n* Loss (Test):\t',best_parameters[0][9])
print('\n*******************************************\n')
### Save the weights ###
best_model.save_weights('./checkpoint')
###########################################
### Part 3 - Making a single prediction ###
###########################################
### INSERT HERE your timeseries in this array [Timesteps]x[Features]###
for_predict = x_test[0,:] # For example, take the first timeseries of the Test set
### Reshape and predict ###
# It will use the best trained regressor #
for_predict = np.reshape(for_predict, (1,for_predict.shape[0], for_predict.shape[1]))
predictions_scaled = best_model.predict(for_predict)
### Invert MinMax transform ###
# Our scaler have used a specific array size.
# We have to add some padding to be able to inverse the transform correctly.
padding = np.zeros((for_predict.shape[0],dataset.shape[1]-1))
predictions_scaled = np.append(padding, predictions_scaled, axis=1)
predictions_scaled = sc.inverse_transform(predictions_scaled)
predictions = predictions_scaled[:,dataset_scaled.shape[1]-1]
### Calculate RMSE for the new predictions ###
# ADD HERE the actual values to the actual_values (without normalization)
actual_values = [1.110] # Just an example
# Calculate RMS
from math import sqrt
from sklearn.metrics import mean_squared_error
rmse = sqrt(mean_squared_error(predictions, actual_values))
print('Predictions RMSE: %.3f' % rmse) | [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"numpy.append",
"numpy.reshape",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"keras.optimizers.Adam",
"datetime.datetime.st... | [((364, 390), 'pandas.read_csv', 'pd.read_csv', (['"""dataset.csv"""'], {}), "('dataset.csv')\n", (375, 390), True, 'import pandas as pd\n'), ((1746, 1780), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1758, 1780), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2231, 2261), 'numpy.reshape', 'np.reshape', (['y', '(y.shape[0], 1)'], {}), '(y, (y.shape[0], 1))\n', (2241, 2261), True, 'import numpy as np\n'), ((2416, 2475), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'test_size', 'random_state': '(0)'}), '(x, y, test_size=test_size, random_state=0)\n', (2432, 2475), False, 'from sklearn.model_selection import train_test_split\n'), ((4078, 4090), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4088, 4090), False, 'from keras.models import Sequential\n'), ((7702, 7774), 'numpy.reshape', 'np.reshape', (['for_predict', '(1, for_predict.shape[0], for_predict.shape[1])'], {}), '(for_predict, (1, for_predict.shape[0], for_predict.shape[1]))\n', (7712, 7774), True, 'import numpy as np\n'), ((7993, 8047), 'numpy.zeros', 'np.zeros', (['(for_predict.shape[0], dataset.shape[1] - 1)'], {}), '((for_predict.shape[0], dataset.shape[1] - 1))\n', (8001, 8047), True, 'import numpy as np\n'), ((8066, 8112), 'numpy.append', 'np.append', (['padding', 'predictions_scaled'], {'axis': '(1)'}), '(padding, predictions_scaled, axis=1)\n', (8075, 8112), True, 'import numpy as np\n'), ((2202, 2213), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2210, 2213), True, 'import numpy as np\n'), ((2215, 2226), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2223, 2226), True, 'import numpy as np\n'), ((2939, 2951), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2949, 2951), False, 'from keras.models import Sequential\n'), ((8499, 8545), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predictions', 'actual_values'], {}), '(predictions, actual_values)\n', (8517, 8545), False, 'from sklearn.metrics import mean_squared_error\n'), ((776, 790), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (780, 790), False, 'from keras.optimizers import Adam\n'), ((3040, 3145), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'units_per_layer', 'return_sequences': '(True)', 'input_shape': '(x_train.shape[1], x_train.shape[2])'}), '(units=units_per_layer, return_sequences=True, input_shape=(x_train.\n shape[1], x_train.shape[2]))\n', (3044, 3145), False, 'from keras.layers import LSTM\n'), ((3166, 3182), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3173, 3182), False, 'from keras.layers import Dropout\n'), ((3447, 3474), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'units_per_layer'}), '(units=units_per_layer)\n', (3451, 3474), False, 'from keras.layers import LSTM\n'), ((3496, 3512), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3503, 3512), False, 'from keras.layers import Dropout\n'), ((3566, 3580), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (3571, 3580), False, 'from keras.layers import Dense\n'), ((1228, 1286), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dataset.iloc[i, 4]', '"""%m/%d/%Y"""'], {}), "(dataset.iloc[i, 4], '%m/%d/%Y')\n", (1254, 1286), False, 'import datetime\n'), ((3296, 3346), 'keras.layers.LSTM', 'LSTM', ([], {'units': 'units_per_layer', 'return_sequences': '(True)'}), '(units=units_per_layer, return_sequences=True)\n', (3300, 3346), False, 'from keras.layers import LSTM\n'), ((3374, 3390), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3381, 3390), False, 'from keras.layers import Dropout\n'), ((5051, 5126), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss'][2:epochs]"], {'color': '"""blue"""', 'label': '"""Test"""'}), "(history.history['val_loss'][2:epochs], color='blue', label='Test')\n", (5059, 5126), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5234), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss'][2:epochs]"], {'color': '"""red"""', 'label': '"""Train"""'}), "(history.history['loss'][2:epochs], color='red', label='Train')\n", (5171, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5271, 5291), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5281, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5324, 5343), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (5334, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5388), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5386, 5388), True, 'import matplotlib.pyplot as plt\n'), ((5421, 5431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5429, 5431), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/python3
import serial # pip3 install pyserial
import argparse
import time
import scipy.signal
from rtlsdr import RtlSdr # pip3 install pyrtlsdr
import numpy as np
import matplotlib.pyplot as plt
import csv
def isFloat(string):
try:
float(string)
return True
except ValueError:
return False
def set_pll(s, freq, on=1, power=1, wait_for_ok=True):
s.flushInput()
cmd = str(on) + ' ' + str(freq) + ' ' + str(power) + '\n';
# print('sending ' + cmd)
s.write(str.encode(cmd))
# line = s.readline()
# print(line)
# line = s.readline()
# print(line)
# if wait_for_ok:
# ack_ok = False
# while not ack_ok:
# line = s.readline()
# #print(line)
# if line == b'ok\n':
# ack_ok = True
def sdr_get_power(sdr):
"""Measures the RMS power with a RTL-SDR.
"""
samples = sdr.read_samples(1024*16)
freq,psd = scipy.signal.welch(samples,sdr.sample_rate/1e6,nperseg=8192,return_onesided=0, window='flattop')
psd = 10*np.log10(np.sqrt(psd**2));
freq += sdr.center_freq/1e6
return freq,psd;
def readCalibrationFile(path, index):
if path is not None:
cal_file = dict()
with open(path, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t')
for row in csvreader:
if not isFloat(row[0]): continue
f = float(row[0])
power = float(row[index])
cal_file[f] = power
return cal_file
return None
def sdr_init(index, freq, gain, sample_rate=2.4e6):
sdr = RtlSdr(device_index = index)
sdr.sample_rate = 2.4e6
sdr.center_freq = freq * 1e6
sdr.gain = gain
sdr.set_agc_mode(0)
sdr_get_power(sdr) #First read doesn't work
return sdr
def sdr_measure(sdr, f, cal_val, f_range = 1, nb_meas = 5):
sdr.center_freq = f * 1e6
avg = []
for j in range(nb_meas):
freq, psd = sdr_get_power(sdr)
max_p = np.min(psd)
for i in range(len(freq)):
max_p = psd[i] if (f-f_range < freq[i] < f+f_range and psd[i] > max_p) else max_p;
avg.append(max_p)
avg = np.mean(avg)
if cal_val is not None: avg -= cal_val[f]
return avg
def main():
pass;
parser = argparse.ArgumentParser(description='EMI mapping with 3D-printer and RTL-SDR.')
parser.add_argument('-p', '--serial-port', type=str, help='serial port',default='/dev/ttyUSB0')
parser.add_argument('-b', '--baud-rate', type=int, help='serial baud rate',default=9600)
parser.add_argument('-l', '--frequency-lbound', type=float, help='',default=1000)
parser.add_argument('-s', '--frequency-step', type=float, help='',default=1)
parser.add_argument('-r', '--frequency-span', type=float, help='',default=300)
parser.add_argument('-g', '--gain', type=int, help='sets the SDR gain in 0.1dB',default=0)
parser.add_argument('-t', '--thru', type=str, help='Input file of a thru measurement')
parser.add_argument('-o', '--open', type=str, help='Input file of an open/short measurement')
parser.add_argument('--invert-sdr', action='store_true', help='Swaps the S11 and S21 SDRs')
args = parser.parse_args()
# Args
s11_listen = len(RtlSdr.get_device_serial_addresses()) > 1
#if not s11_listen:
# print("-> Running in single device mode (S21 only)")
#else:
# print("-> Running in dual device mode (S11 and S21)")
# SDR stuff
freq_lbound = args.frequency_lbound ;
freq_range = args.frequency_span;
freq_ubound = freq_lbound + freq_range;
freq_step = args.frequency_step;
frequencies = np.arange(freq_lbound,freq_ubound,freq_step)
# Open serial port
s = serial.Serial(args.serial_port, args.baud_rate, timeout=1)
time.sleep(2) # Wait to boot
# Calibration (Open/Short, (Load), Thru)
cal_s11 = readCalibrationFile(args.open, 2) # O/S -> S11 = 0 dB
cal_s21 = readCalibrationFile(args.thru, 1) # Thru -> S21 = 0 dB
# Open SDRs
sdr_S21_index = 0 if not args.invert_sdr else 1
sdr_S11_index = 1 if not args.invert_sdr else 0
sdr_S21 = sdr_init(sdr_S21_index, freq_lbound * 1e6, args.gain)
if s11_listen:
sdr_S11 = sdr_init(sdr_S11_index, freq_lbound * 1e6, args.gain)
s11 = []
s21 = []
print('Frequency\tS21\tS11')
for f in frequencies:
print(f, end="\t", flush=True)
set_pll(s,f)
# S21
tmp = sdr_measure(sdr_S21, f, cal_s21)
s21.append(tmp)
print(tmp, end="\t", flush=True)
# S11
if s11_listen:
tmp = sdr_measure(sdr_S11, f, cal_s11)
s11.append(tmp)
print(tmp, flush=True)
else:
print(0, flush=True)
#s21 = s21 - np.max(s21)
plt.plot(frequencies, s21, label="S21")
if s11_listen:
#s11 = s11 - np.max(s11)
plt.plot(frequencies, s11, label="S11")
plt.grid(True)
plt.legend(loc='lower right')
plt.xlim([freq_lbound,freq_ubound])
#plt.ylim([None,0])
plt.show()
# Close ressources
set_pll(s,f,0,0)
s.close()
sdr_S21.close()
if s11_listen:
sdr_S11.close()
if __name__== "__main__":
main()
| [
"serial.Serial",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"csv.reader",
"rtlsdr.RtlSdr",
"matplotlib.pyplot.legend",
"time.sleep",
"numpy.min",
"numpy.mean",
"numpy.arange",
"rtlsdr.RtlSdr.get_device_serial_addresses",
"matplo... | [((2287, 2366), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""EMI mapping with 3D-printer and RTL-SDR."""'}), "(description='EMI mapping with 3D-printer and RTL-SDR.')\n", (2310, 2366), False, 'import argparse\n'), ((3565, 3611), 'numpy.arange', 'np.arange', (['freq_lbound', 'freq_ubound', 'freq_step'], {}), '(freq_lbound, freq_ubound, freq_step)\n', (3574, 3611), True, 'import numpy as np\n'), ((3634, 3692), 'serial.Serial', 'serial.Serial', (['args.serial_port', 'args.baud_rate'], {'timeout': '(1)'}), '(args.serial_port, args.baud_rate, timeout=1)\n', (3647, 3692), False, 'import serial\n'), ((3693, 3706), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3703, 3706), False, 'import time\n'), ((4624, 4663), 'matplotlib.pyplot.plot', 'plt.plot', (['frequencies', 's21'], {'label': '"""S21"""'}), "(frequencies, s21, label='S21')\n", (4632, 4663), True, 'import matplotlib.pyplot as plt\n'), ((4752, 4766), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4760, 4766), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4796), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4777, 4796), True, 'import matplotlib.pyplot as plt\n'), ((4797, 4833), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[freq_lbound, freq_ubound]'], {}), '([freq_lbound, freq_ubound])\n', (4805, 4833), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4861, 4863), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1636), 'rtlsdr.RtlSdr', 'RtlSdr', ([], {'device_index': 'index'}), '(device_index=index)\n', (1616, 1636), False, 'from rtlsdr import RtlSdr\n'), ((2173, 2185), 'numpy.mean', 'np.mean', (['avg'], {}), '(avg)\n', (2180, 2185), True, 'import numpy as np\n'), ((4712, 4751), 'matplotlib.pyplot.plot', 'plt.plot', (['frequencies', 's11'], {'label': '"""S11"""'}), "(frequencies, s11, label='S11')\n", (4720, 4751), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2006), 'numpy.min', 'np.min', (['psd'], {}), '(psd)\n', (2001, 2006), True, 'import numpy as np\n'), ((3207, 3243), 'rtlsdr.RtlSdr.get_device_serial_addresses', 'RtlSdr.get_device_serial_addresses', ([], {}), '()\n', (3241, 3243), False, 'from rtlsdr import RtlSdr\n'), ((1049, 1066), 'numpy.sqrt', 'np.sqrt', (['(psd ** 2)'], {}), '(psd ** 2)\n', (1056, 1066), True, 'import numpy as np\n'), ((1276, 1311), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (1286, 1311), False, 'import csv\n')] |
import numpy
from copy import deepcopy
from barracuda.neurontype import neuron_type
import json
class _Neuron(object):
def __init__(self):
self.input = None
self.output = None
def forward(self,input_data):
raise NotImplementedError
def backward(self,output_error, learning_rate):
raise NotImplementedError
def serialize(self):
raise NotImplementedError
class InterNeuron(_Neuron):
def __init__(self,input_size:int, output_size:int):
super().__init__()
self.shape = (input_size, output_size)
self.weights:numpy.array = numpy.random.uniform(low=-1,high=1,size=(input_size, output_size))
self.bias:numpy.array = numpy.random.uniform(low=-1,high=1,size=(1, output_size))
self.neuron_type = neuron_type.InterNeuron
def forward(self, input_data):
self.input= numpy.array(input_data)
self.output:numpy.array = numpy.dot(self.input, self.weights) + self.bias
return self.output
def backward(self, output_error, learning_rate):
input_error = numpy.dot(output_error, self.weights.T)
weights_error = numpy.dot(self.input.T, output_error)
self.weights -= learning_rate * weights_error
self.bias -= learning_rate * output_error
return input_error
def serialize(self):
val = {"neuron_type":"InterNeuron","shape":self.shape,"weights":self.weights.tolist(),"bias":self.bias.tolist()}
serialized = json.dumps(val,indent=2)
return serialized
class ActivationNeuron(_Neuron):
def __init__(self,activation):
super().__init__()
self.activation = activation
self.neuron_type = neuron_type.ActivationNeuron
def forward(self, input_data):
self.input = input_data
self.output = self.activation.normal(self.input)
return self.output
def backward(self, output_error, learning_rate):
return self.activation.derivative(self.input) * output_error
def serialize(self):
val = {"neuron_type":"ActivationNeuron","activation":self.activation.func}
serialized = json.dumps(val,indent=2)
return serialized
| [
"numpy.dot",
"numpy.random.uniform",
"numpy.array",
"json.dumps"
] | [((609, 677), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(input_size, output_size)'}), '(low=-1, high=1, size=(input_size, output_size))\n', (629, 677), False, 'import numpy\n'), ((709, 768), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(1, output_size)'}), '(low=-1, high=1, size=(1, output_size))\n', (729, 768), False, 'import numpy\n'), ((874, 897), 'numpy.array', 'numpy.array', (['input_data'], {}), '(input_data)\n', (885, 897), False, 'import numpy\n'), ((1083, 1122), 'numpy.dot', 'numpy.dot', (['output_error', 'self.weights.T'], {}), '(output_error, self.weights.T)\n', (1092, 1122), False, 'import numpy\n'), ((1147, 1184), 'numpy.dot', 'numpy.dot', (['self.input.T', 'output_error'], {}), '(self.input.T, output_error)\n', (1156, 1184), False, 'import numpy\n'), ((1484, 1509), 'json.dumps', 'json.dumps', (['val'], {'indent': '(2)'}), '(val, indent=2)\n', (1494, 1509), False, 'import json\n'), ((2136, 2161), 'json.dumps', 'json.dumps', (['val'], {'indent': '(2)'}), '(val, indent=2)\n', (2146, 2161), False, 'import json\n'), ((932, 967), 'numpy.dot', 'numpy.dot', (['self.input', 'self.weights'], {}), '(self.input, self.weights)\n', (941, 967), False, 'import numpy\n')] |
import imp, glob, numpy
imp.load_source('common_functions','common_functions.py')
import common_functions as cf
def dirty_cont_image(config,config_raw,config_file,logger):
"""
Generates a dirty image of each science target including the continuum emission.
Checks that the pixel size and image size are set (will prompt user if in interactive mode).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting making dirty continuum image.')
calib = config['calibration']
rest_freq = config['global']['rest_freq']
targets = calib['target_names'][:]
fields = calib['targets'][:]
for i in range(len(targets)):
target = targets[i]
if 'spw' in target:
inx = target.index('.spw')
target_name = target[:inx]
if target_name in calib['target_names'][i-1]:
fields.insert(i,fields[i-1])
if calib['mosaic']:
targets = list(set(calib['target_names']))
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
cf.makedir('/.'+img_dir,logger)
logger.info('Removing any existing dirty continuum images.')
del_list = glob.glob(img_dir+'*cont.dirty*')
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
logger.info('Checking clean parameters for dirty image (inc. continuum).')
reset_cln = False
if (len(cln_param['pix_size']) == 0) or (len(cln_param['pix_size']) != len(targets)):
if not interactive:
logger.critical('The number of pixel sizes provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['pix_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['pix_size']) < len(targets):
logger.warning('There are more target fields than pixel sizes. Appending blanks.')
while len(cln_param['pix_size']) < len(targets):
cln_param['pix_size'].append('')
elif len(cln_param['pix_size']) > len(targets):
logger.warning('There are more pixel sizes than target fields.')
logger.info('Current pixel sizes: {}'.format(cln_param['pix_size']))
logger.warning('The pixel size list will now be truncated to match the number of targets.')
cln_param['pix_size'] = cln_param['pix_size'][:len(targets)]
elif interactive:
print('Current pixel sizes set as:')
for i in range(len(cln_param['pix_size'])):
print('{0}: {1}'.format(targets[i],cln_param['pix_size'][i]))
resp = str(raw_input('Do you want revise the pixel sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired pixel size:')
for i in range(len(targets)):
cln_param['pix_size'][i] = cf.uinput('Pixel size for {}: '.format(targets[i]), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(targets[i], cln_param['pix_size'][i]))
logger.info('Updating config file to set pixel sizes.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Pixel sizes set as: {}.'.format(cln_param['pix_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['im_size']) == 0 or len(cln_param['im_size']) != len(targets):
if not interactive:
logger.critical('The number of image sizes provided does not match the number of targets.')
logger.info('Image sizes: {}'.format(cln_param['im_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['im_size']) < len(targets):
logger.warning('There are more target fields than image sizes. Appending blanks.')
while len(cln_param['im_size']) < len(targets):
cln_param['im_size'].append('')
elif len(cln_param['im_size']) > len(targets):
logger.warning('There are more image sizes than target fields.')
logger.info('Current image sizes: {} pixels.'.format(cln_param['im_size']))
logger.warning('The image size list will now be truncated to match the number of targets.')
cln_param['im_size'] = cln_param['im_size'][:len(targets)]
elif interactive:
print('Current images sizes set as:')
for i in range(len(cln_param['im_size'])):
print('{0}: {1}'.format(targets[i],cln_param['im_size'][i]))
resp = str(raw_input('Do you want revise the image sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired image size:')
for i in range(len(targets)):
print('Note: The pixel size for this target was set to: {}'.format(cln_param['pix_size'][i]))
cln_param['im_size'][i] = cf.uinput('Image size for {}: '.format(targets[i]), cln_param['im_size'][i])
logger.info('Setting image size for {0} as: {1} x {2}.'.format(targets[i], cln_param['im_size'][i],cln_param['pix_size'][i]))
logger.info('Updating config file to set image sizes.')
config_raw.set('clean','im_size',cln_param['im_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Image sizes set as: {} pixels.'.format(cln_param['im_size']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = fields[i]
gridder = 'wproject'
if calib['mosaic']:
for target_name in targets:
inx = [j for j in range(len(calib['target_names'])) if target_name in calib['target_names'][j]]
fields = numpy.array(calib['targets'],dtype='str')[inx]
field = ','.join(fields)
gridder = 'mosaic'
logger.info('Making dirty image of {} (inc. continuum).'.format(target))
command = "tclean(vis='{0}{1}.split', field='{2}', imagename='{3}{1}.cont.dirty', cell='{4}', imsize=[{5},{5}], specmode='cube', outframe='bary', veltype='radio', restfreq='{6}', gridder='{7}', wprojplanes=-1, pblimit=0.1, normtype='flatnoise', deconvolver='hogbom', weighting='briggs', robust={8}, niter=0, phasecenter='{9}', interactive=False)".format(src_dir,target,field,img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,gridder,cln_param['robust'],cln_param['phasecenter'])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('Completed making dirty continuum image.')
# Read configuration file with parameters
config_file = sys.argv[-1]
config,config_raw = cf.read_config(config_file)
interactive = config['global']['interactive']
# Set up your logger
logger = cf.get_logger(LOG_FILE_INFO = '{}.log'.format(config['global']['project_name']),
LOG_FILE_ERROR = '{}_errors.log'.format(config['global']['project_name'])) # Set up your logger
# Define MS file name
msfile = '{0}.ms'.format(config['global']['project_name'])
#Make dirty continuum image
cf.check_casaversion(logger)
cf.rmdir(config['global']['img_dir'],logger)
dirty_cont_image(config,config_raw,config_file,logger)
#Review and backup parameters file
cf.diff_pipeline_params(config_file,logger)
cf.backup_pipeline_params(config_file,logger) | [
"common_functions.read_config",
"common_functions.diff_pipeline_params",
"common_functions.makedir",
"imp.load_source",
"numpy.array",
"common_functions.check_casaversion",
"glob.glob",
"common_functions.check_casalog",
"common_functions.rmdir",
"common_functions.backup_pipeline_params"
] | [((24, 82), 'imp.load_source', 'imp.load_source', (['"""common_functions"""', '"""common_functions.py"""'], {}), "('common_functions', 'common_functions.py')\n", (39, 82), False, 'import imp, glob, numpy\n'), ((7308, 7335), 'common_functions.read_config', 'cf.read_config', (['config_file'], {}), '(config_file)\n', (7322, 7335), True, 'import common_functions as cf\n'), ((7722, 7750), 'common_functions.check_casaversion', 'cf.check_casaversion', (['logger'], {}), '(logger)\n', (7742, 7750), True, 'import common_functions as cf\n'), ((7751, 7796), 'common_functions.rmdir', 'cf.rmdir', (["config['global']['img_dir']", 'logger'], {}), "(config['global']['img_dir'], logger)\n", (7759, 7796), True, 'import common_functions as cf\n'), ((7887, 7931), 'common_functions.diff_pipeline_params', 'cf.diff_pipeline_params', (['config_file', 'logger'], {}), '(config_file, logger)\n', (7910, 7931), True, 'import common_functions as cf\n'), ((7931, 7977), 'common_functions.backup_pipeline_params', 'cf.backup_pipeline_params', (['config_file', 'logger'], {}), '(config_file, logger)\n', (7956, 7977), True, 'import common_functions as cf\n'), ((1254, 1288), 'common_functions.makedir', 'cf.makedir', (["('/.' + img_dir)", 'logger'], {}), "('/.' + img_dir, logger)\n", (1264, 1288), True, 'import common_functions as cf\n'), ((1366, 1401), 'glob.glob', 'glob.glob', (["(img_dir + '*cont.dirty*')"], {}), "(img_dir + '*cont.dirty*')\n", (1375, 1401), False, 'import imp, glob, numpy\n'), ((7103, 7156), 'common_functions.check_casalog', 'cf.check_casalog', (['config', 'config_raw', 'logger', 'casalog'], {}), '(config, config_raw, logger, casalog)\n', (7119, 7156), True, 'import common_functions as cf\n'), ((6320, 6362), 'numpy.array', 'numpy.array', (["calib['targets']"], {'dtype': '"""str"""'}), "(calib['targets'], dtype='str')\n", (6331, 6362), False, 'import imp, glob, numpy\n')] |
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import plotly.express as px
import pyarrow.parquet as pq
from scipy.stats import betabinom as sp_betabinom
# import dashboard
from remade import dashboard as dashboard
def clip_df(df, column):
if column in df.columns:
df["_" + column] = df[column] # save original data _column
df[column] = np.clip(df[column], a_min=0, a_max=None)
def pd_wide_to_long_forward_reverse(group_wide, sep, direction):
stub_names = ["k", "N", "f"]
group_long = pd.wide_to_long(
group_wide,
stubnames=stub_names,
i="tax_id",
j="z",
sep=sep,
)[stub_names]
group_long["direction"] = direction
return group_long.reset_index()
def wide_to_long_df(group_wide):
group_long_forward = pd_wide_to_long_forward_reverse(
group_wide,
sep="+",
direction="Forward",
)
group_long_reverse = pd_wide_to_long_forward_reverse(
group_wide,
sep="-",
direction="Reverse",
)
group_long = pd.concat([group_long_forward, group_long_reverse])
# group_long.loc[:, ["k", "N"]] = group_long.loc[:, ["k", "N"]].astype(int)
return group_long
class Results:
def __init__(self, results_dir):
self.results_dir = Path(results_dir)
self._load_df_results()
self._set_cmap()
self._set_hover_info()
def _load_parquet_file(self, results_dir):
df = pq.read_table(results_dir).to_pandas()
return df
def _load_df_results(self):
df = self._load_parquet_file(self.results_dir)
for column in ["lambda_LR", "forward_lambda_LR", "reverse_lambda_LR"]:
clip_df(df, column)
df["D_max_significance"] = df["D_max"] / df["D_max_std"]
df["rho_Ac_abs"] = np.abs(df["rho_Ac"])
log_columns = [
"N_reads",
"N_alignments",
"lambda_LR",
"phi",
"k_sum_total",
"N_sum_total",
]
for column in log_columns:
log_column = "log_" + column
df.loc[:, log_column] = np.log10(1 + df[column])
self.df = df
self.all_tax_ids = set(self.df.tax_id.unique())
self.all_tax_names = set(self.df.tax_name.unique())
self.all_tax_ranks = set(self.df.tax_rank.unique())
self.shortnames = list(self.df.shortname.unique())
self.columns = list(self.df.columns)
self.set_marker_size(variable="N_reads", function="sqrt", slider=30)
def set_marker_size(self, variable="N_reads", function="sqrt", slider=30):
d_functions = {
"constant": np.ones_like,
"identity": lambda x: x,
"sqrt": np.sqrt,
"log10": np.log10,
}
self.df.loc[:, "size"] = d_functions[function](self.df[variable])
self.max_of_size = np.max(self.df["size"])
self.marker_size = slider
def filter(self, filters):
query = ""
for column, filter in filters.items():
if filter is None:
continue
elif column == "shortnames":
query += f"(shortname in {filter}) & "
elif column == "shortname":
query += f"(shortname == '{filter}') & "
elif column == "tax_id":
query += f"(tax_id == {filter}) & "
elif column == "tax_ids":
query += f"(tax_id in {filter}) & "
elif column == "tax_rank":
query += f"(tax_rank == {filter}) & "
elif column == "tax_ranks":
query += f"(tax_rank in {filter}) & "
elif column == "tax_name":
query += f"(tax_name == {filter}) & "
elif column == "tax_names":
query += f"(tax_name in {filter}) & "
else:
low, high = filter
if dashboard.utils.is_log_transform_column(column):
low = dashboard.utils.log_transform_slider(low)
high = dashboard.utils.log_transform_slider(high)
query += f"({low} <= {column} <= {high}) & "
query = query[:-2]
# print(query)
return self.df.query(query)
def _set_cmap(self):
# https://plotly.com/python/discrete-color/#color-sequences-in-plotly-express
# blue, orange, green, red, purple, brown, pink, grey, camouflage, turquoise
cmap = px.colors.qualitative.D3
N_cmap = len(cmap)
groupby = self.df.groupby("shortname", sort=False)
symbol_counter = 0
d_cmap = {}
d_symbols = {}
for i, (name, _) in enumerate(groupby):
if (i % N_cmap) == 0 and i != 0:
symbol_counter += 1
d_cmap[name] = cmap[i % N_cmap]
d_symbols[name] = symbol_counter
self.cmap = cmap
self.d_cmap = d_cmap
self.d_symbols = d_symbols
self.d_cmap_fit = {"Forward": cmap[0], "Reverse": cmap[3], "Fit": cmap[2]}
def _set_hover_info(self):
columns = list(self.df.columns)
placeholder = "_XXX_"
contains_Bayesian = any(["Bayesian" in column for column in columns])
if contains_Bayesian:
self.custom_data_columns = [
"shortname",
"tax_name",
"tax_rank",
"tax_id",
# Frequentist fits
"lambda_LR",
"D_max",
"D_max_std",
"q",
"q_std",
"phi",
"phi_std",
"asymmetry",
"rho_Ac",
# Bayesian Fits
"Bayesian_n_sigma",
"Bayesian_D_max",
"Bayesian_D_max_std",
"Bayesian_q",
"Bayesian_phi",
# Counts
"N_reads",
"N_alignments",
"N_sum_total",
"k_sum_total",
]
self.hovertemplate = (
"<b>%{customdata[_XXX_]}</b><br><br>"
"<b>Tax</b>: <br>"
" Name: %{customdata[_XXX_]} <br>"
" Rank: %{customdata[_XXX_]} <br>"
" ID: %{customdata[_XXX_]} <br><br>"
"<b>Fit Results</b>: <br>"
" LR: %{customdata[_XXX_]:9.2f} <br>"
" D max: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" q: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" phi: %{customdata[_XXX_]:9.3s} ± %{customdata[_XXX_]:.3s} <br>"
" asymmetry:%{customdata[_XXX_]:9.3f} <br>"
" rho_Ac: %{customdata[_XXX_]:9.3f} <br><br>"
"<b>Bayesian Fit Results</b>: <br>"
" n sigma: %{customdata[_XXX_]:9.2f} <br>"
" D max: %{customdata[_XXX_]:9.2f} <br>"
" q: %{customdata[_XXX_]:9.2f} <br>"
" phi: %{customdata[_XXX_]:9.3s} <br><br>"
"<b>Counts</b>: <br>"
" N reads: %{customdata[_XXX_]:6.3s} <br>"
" N alignments:%{customdata[_XXX_]:6.3s} <br>"
" N sum total: %{customdata[_XXX_]:6.3s} <br>"
" k sum total: %{customdata[_XXX_]:6.3s} <br>"
"<extra></extra>"
)
else:
self.custom_data_columns = [
"shortname",
"tax_name",
"tax_rank",
"tax_id",
# Frequentist fits
"lambda_LR",
"D_max",
"D_max_std",
"q",
"q_std",
"phi",
"phi_std",
"asymmetry",
"rho_Ac",
# Counts
"N_reads",
"N_alignments",
"N_sum_total",
"k_sum_total",
]
self.hovertemplate = (
"<b>%{customdata[_XXX_]}</b><br><br>"
"<b>Tax</b>: <br>"
" Name: %{customdata[_XXX_]} <br>"
" Rank: %{customdata[_XXX_]} <br>"
" ID: %{customdata[_XXX_]} <br><br>"
"<b>Fit Results</b>: <br>"
" LR: %{customdata[_XXX_]:9.2f} <br>"
" D max: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" q: %{customdata[_XXX_]:9.2f} ± %{customdata[_XXX_]:.2f} <br>"
" phi: %{customdata[_XXX_]:9.3s} ± %{customdata[_XXX_]:.3s} <br>"
" asymmetry:%{customdata[_XXX_]:9.3f} <br>"
" rho_Ac: %{customdata[_XXX_]:9.3f} <br><br>"
"<b>Counts</b>: <br>"
" N reads: %{customdata[_XXX_]:6.3s} <br>"
" N alignments:%{customdata[_XXX_]:6.3s} <br>"
" N sum total: %{customdata[_XXX_]:6.3s} <br>"
" k sum total: %{customdata[_XXX_]:6.3s} <br>"
"<extra></extra>"
)
data_counter = 0
i = 0
while True:
if self.hovertemplate[i : i + len(placeholder)] == placeholder:
# break
s_new = self.hovertemplate[:i]
s_new += str(data_counter)
s_new += self.hovertemplate[i + len(placeholder) :]
self.hovertemplate = s_new
data_counter += 1
i += 1
if i >= len(self.hovertemplate):
break
self.customdata = self.df[self.custom_data_columns]
self.hovertemplate_fit = (
"Fit: <br>D(z) = %{y:.3f} ± %{error_y.array:.3f}<br>" "<extra></extra>"
)
def parse_click_data(self, click_data, column):
try:
index = self.custom_data_columns.index(column)
value = click_data["points"][0]["customdata"][index]
return value
except Exception as e:
raise e
def get_single_count_group(self, shortname, tax_id, forward_reverse=""):
query = f"shortname == '{shortname}' & tax_id == {tax_id}"
group_wide = self.df.query(query)
group = wide_to_long_df(group_wide)
if forward_reverse.lower() == "forward":
return group.query(f"direction=='Forward'")
elif forward_reverse.lower() == "reverse":
return group.query(f"direction=='Reverse'")
else:
return group
def get_single_fit_prediction(self, shortname, tax_id, forward_reverse=""):
query = f"shortname == '{shortname}' & tax_id == {tax_id}"
ds = self.df.query(query)
if len(ds) != 1:
raise AssertionError(f"Something wrong here, got: {ds}")
group = self.get_single_count_group(shortname, tax_id, forward_reverse)
if forward_reverse.lower() == "forward":
prefix = "forward_"
elif forward_reverse.lower() == "reverse":
prefix = "reverse_"
else:
prefix = ""
A = getattr(ds, f"{prefix}A").values
q = getattr(ds, f"{prefix}q").values
c = getattr(ds, f"{prefix}c").values
phi = getattr(ds, f"{prefix}phi").values
z = group.z.values[:15]
N = group.N.values[:15]
Dz = A * (1 - q) ** (np.abs(z) - 1) + c
alpha = Dz * phi
beta = (1 - Dz) * phi
dist = sp_betabinom(n=N, a=alpha, b=beta)
std = np.sqrt(dist.var()) / N
d_out = {"mu": Dz, "std": std, "Dz": Dz, "z": z}
return d_out
def load(results_dir=Path("./data/results")):
return Results(results_dir)
| [
"pandas.wide_to_long",
"numpy.abs",
"remade.dashboard.utils.log_transform_slider",
"numpy.clip",
"pathlib.Path",
"numpy.max",
"remade.dashboard.utils.is_log_transform_column",
"pyarrow.parquet.read_table",
"numpy.log10",
"pandas.concat",
"scipy.stats.betabinom"
] | [((1080, 1131), 'pandas.concat', 'pd.concat', (['[group_long_forward, group_long_reverse]'], {}), '([group_long_forward, group_long_reverse])\n', (1089, 1131), True, 'import pandas as pd\n'), ((11801, 11823), 'pathlib.Path', 'Path', (['"""./data/results"""'], {}), "('./data/results')\n", (11805, 11823), False, 'from pathlib import Path\n'), ((394, 434), 'numpy.clip', 'np.clip', (['df[column]'], {'a_min': '(0)', 'a_max': 'None'}), '(df[column], a_min=0, a_max=None)\n', (401, 434), True, 'import numpy as np\n'), ((552, 629), 'pandas.wide_to_long', 'pd.wide_to_long', (['group_wide'], {'stubnames': 'stub_names', 'i': '"""tax_id"""', 'j': '"""z"""', 'sep': 'sep'}), "(group_wide, stubnames=stub_names, i='tax_id', j='z', sep=sep)\n", (567, 629), True, 'import pandas as pd\n'), ((1317, 1334), 'pathlib.Path', 'Path', (['results_dir'], {}), '(results_dir)\n', (1321, 1334), False, 'from pathlib import Path\n'), ((1834, 1854), 'numpy.abs', 'np.abs', (["df['rho_Ac']"], {}), "(df['rho_Ac'])\n", (1840, 1854), True, 'import numpy as np\n'), ((2909, 2932), 'numpy.max', 'np.max', (["self.df['size']"], {}), "(self.df['size'])\n", (2915, 2932), True, 'import numpy as np\n'), ((11625, 11659), 'scipy.stats.betabinom', 'sp_betabinom', ([], {'n': 'N', 'a': 'alpha', 'b': 'beta'}), '(n=N, a=alpha, b=beta)\n', (11637, 11659), True, 'from scipy.stats import betabinom as sp_betabinom\n'), ((2151, 2175), 'numpy.log10', 'np.log10', (['(1 + df[column])'], {}), '(1 + df[column])\n', (2159, 2175), True, 'import numpy as np\n'), ((1484, 1510), 'pyarrow.parquet.read_table', 'pq.read_table', (['results_dir'], {}), '(results_dir)\n', (1497, 1510), True, 'import pyarrow.parquet as pq\n'), ((11534, 11543), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (11540, 11543), True, 'import numpy as np\n'), ((3949, 3996), 'remade.dashboard.utils.is_log_transform_column', 'dashboard.utils.is_log_transform_column', (['column'], {}), '(column)\n', (3988, 3996), True, 'from remade import dashboard as dashboard\n'), ((4024, 4065), 'remade.dashboard.utils.log_transform_slider', 'dashboard.utils.log_transform_slider', (['low'], {}), '(low)\n', (4060, 4065), True, 'from remade import dashboard as dashboard\n'), ((4093, 4135), 'remade.dashboard.utils.log_transform_slider', 'dashboard.utils.log_transform_slider', (['high'], {}), '(high)\n', (4129, 4135), True, 'from remade import dashboard as dashboard\n')] |
from padertorch.data.segment import Segmenter
import numpy as np
import torch
def test_simple_case():
segmenter = Segmenter(length=32000, include_keys=('x', 'y'),
shift=16000)
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'])
def test_fixed_anchor():
segmenter = Segmenter(length=32000, include_keys=('x', 'y'),
shift=16000, anchor=10)
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], 10 + np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'])
def test_random_anchor():
"""
Checks fix for random anchor in https://github.com/fgnt/padertorch/pull/91
"""
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmenter = Segmenter(length=32000, include_keys=('x', 'y'),
shift=32000, anchor='random')
segmented = segmenter(ex)
assert type(segmented) == list, segmented
segmenter = Segmenter(length=32000, include_keys=('x', 'y'),
shift=32000, anchor='random_max_segments')
segmented = segmenter(ex)
assert type(segmented) == list, segmented
assert len(segmented) == 2
def test_copy_keys():
segmenter = Segmenter(length=32000, include_keys=('x', 'y'),
shift=16000, copy_keys='gender')
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
expected_keys = [key for key in ex.keys() if not key == 'num_samples']
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in expected_keys])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'])
def test_include_none():
segmenter = Segmenter(length=32000, shift=16000)
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'])
def test_include_to_larger():
segmenter = Segmenter(length=32000, shift=16000,
include_keys=['x', 'y', 'z'])
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
error = False
try:
segmenter(ex)
except AssertionError:
error = True
assert error, segmenter
def test_include_none_with_torch():
segmenter = Segmenter(length=32000, shift=16000)
array = np.random.randn(5,10,64000)
ex = {'x': array.copy(), 'y': array.copy(),
'z': torch.tensor(array),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(entry['x'], entry['z'].numpy())
np.testing.assert_equal(entry['x'], entry['y'])
def test_error_include_list():
segmenter = Segmenter(length=32000, shift=16000,
include_keys=['x', 'y', 'z'])
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'z': np.arange(65000).tolist(),
'num_samples': 65000, 'gender': 'm'}
error = False
try:
segmenter(ex)
except ValueError:
error = True
assert error, segmenter
def test_include_none_ignore_list():
segmenter = Segmenter(length=32000, shift=16000)
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'z': np.arange(65000).tolist(),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
segmenter = Segmenter(length=32000, shift=16000,
copy_keys=['num_samples', 'gender'])
segmented = segmenter(ex)
assert type(segmented) == list, segmented
expected_keys = ['x', 'y', 'num_samples', 'gender']
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in expected_keys])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'])
def test_include_exclude():
segmenter = Segmenter(length=32000, shift=16000, exclude_keys='y')
ex = {'x': np.arange(65000), 'y': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['y'], np.arange(65000))
def test_axis():
segmenter = Segmenter(length=32000, shift=16000, include_keys=['x', 'y'],
axis=[-1, 0])
ex = {'x': np.arange(65000), 'y': np.arange(65000)[:, None],
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'][:, 0])
segmenter = Segmenter(length=32000, shift=16000,
include_keys=['x', 'y', 'z'],
axis={'x': 0, 'y': 1, 'z': -1})
array = np.random.randn(65000, 5, 10)
ex = {'x': array.copy(), 'y': array.copy().transpose(1,0,2),
'z': torch.tensor(array.transpose(1,2,0)),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(entry['x'], entry['z'].numpy().transpose(2,0,1))
np.testing.assert_equal(entry['x'], entry['y'].transpose(1,0,2))
def test_axis_dict():
segmenter = Segmenter(length=32000, shift=16000, include_keys=['x', 'y'],
axis={'x': -1, 'y': 0})
ex = {'x': np.arange(65000), 'y': np.arange(65000)[:, None],
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['x'], np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['x'], entry['y'][:, 0])
def test_axis_dict_wildcard():
segmenter = Segmenter(length=32000, shift=16000,
include_keys=['audio_data'],
axis={'audio_data': -1})
ex = {'audio_data': {'x': np.arange(65000), 'y': np.arange(65000)},
'z': np.arange(65000),
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['audio_data']['x'],
np.arange(idx * 16000, 16000 + (idx + 1) * 16000)
)
np.testing.assert_equal(entry['audio_data']['x'],
entry['audio_data']['y'])
np.testing.assert_equal(entry['z'],
np.arange(65000))
def test_wildcard():
segmenter = Segmenter(length=32000, shift=16000,
include_keys=['audio_data'])
ex = {'audio_data': {'x': np.arange(65000), 'y': np.arange(65000)},
'num_samples': 65000, 'gender': 'm'}
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['audio_data']['x'], np.arange(
idx * 16000, 16000 + (idx + 1) * 16000)
)
np.testing.assert_equal(entry['audio_data']['x'],
entry['audio_data']['y'])
def test_wildcard_exclude():
ex = {
'audio_data': {'x': np.arange(65000), 'y': np.arange(65000)[:, None]},
'z': np.arange(65000)[:, None],
'num_samples': 65000, 'gender': 'm'
}
segmenter = Segmenter(length=32000, shift=16000,
include_keys=['audio_data'],
exclude_keys=['audio_data.y'],
axis={'audio_data': -1})
segmented = segmenter(ex)
assert type(segmented) == list, segmented
for idx, entry in enumerate(segmented):
assert all([key in entry.keys() for key in ex.keys()])
np.testing.assert_equal(
entry['audio_data']['x'],
np.arange(idx * 16000, 16000 + (idx + 1) * 16000))
np.testing.assert_equal(entry['audio_data']['y'],
np.arange(65000)[:, None])
def test_length_mode():
examples = [{'x': np.arange(16000), 'y': np.arange(16000),
'num_samples': 16000, 'gender': 'm'},
{'x': np.arange(15900), 'y': np.arange(15900),
'num_samples': 15900, 'gender': 'm'}]
new_length = [{'constant': 950, 'max': 942, 'min': 1000},
{'constant': 950, 'max': 936, 'min': 994}]
for mode in ['constant', 'max', 'min']:
for idx, ex in enumerate(examples):
segmenter = Segmenter(length=950, include_keys=('x'),
mode=mode, padding=True)
segmented = segmenter(ex)
np.testing.assert_equal(segmented[0]['x'],
np.arange(0, new_length[idx][mode]))
new_length = [{'constant': 950, 'max': 947, 'min': 951},
{'constant': 950, 'max': 950, 'min': 954}]
for mode in ['constant', 'max', 'min']:
for idx, ex in enumerate(examples):
segmenter = Segmenter(length=950, shift=250, include_keys=('x'),
mode=mode, padding=True)
segmented = segmenter(ex)
np.testing.assert_equal(segmented[0]['x'],
np.arange(0, new_length[idx][mode]))
| [
"numpy.random.randn",
"padertorch.data.segment.Segmenter",
"numpy.arange",
"numpy.testing.assert_equal",
"torch.tensor"
] | [((120, 181), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'include_keys': "('x', 'y')", 'shift': '(16000)'}), "(length=32000, include_keys=('x', 'y'), shift=16000)\n", (129, 181), False, 'from padertorch.data.segment import Segmenter\n'), ((701, 773), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'include_keys': "('x', 'y')", 'shift': '(16000)', 'anchor': '(10)'}), "(length=32000, include_keys=('x', 'y'), shift=16000, anchor=10)\n", (710, 773), False, 'from padertorch.data.segment import Segmenter\n'), ((1498, 1576), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'include_keys': "('x', 'y')", 'shift': '(32000)', 'anchor': '"""random"""'}), "(length=32000, include_keys=('x', 'y'), shift=32000, anchor='random')\n", (1507, 1576), False, 'from padertorch.data.segment import Segmenter\n'), ((1696, 1792), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'include_keys': "('x', 'y')", 'shift': '(32000)', 'anchor': '"""random_max_segments"""'}), "(length=32000, include_keys=('x', 'y'), shift=32000, anchor=\n 'random_max_segments')\n", (1705, 1792), False, 'from padertorch.data.segment import Segmenter\n'), ((1961, 2047), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'include_keys': "('x', 'y')", 'shift': '(16000)', 'copy_keys': '"""gender"""'}), "(length=32000, include_keys=('x', 'y'), shift=16000, copy_keys=\n 'gender')\n", (1970, 2047), False, 'from padertorch.data.segment import Segmenter\n'), ((2641, 2677), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)'}), '(length=32000, shift=16000)\n', (2650, 2677), False, 'from padertorch.data.segment import Segmenter\n'), ((3176, 3242), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['x', 'y', 'z']"}), "(length=32000, shift=16000, include_keys=['x', 'y', 'z'])\n", (3185, 3242), False, 'from padertorch.data.segment import Segmenter\n'), ((3551, 3587), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)'}), '(length=32000, shift=16000)\n', (3560, 3587), False, 'from padertorch.data.segment import Segmenter\n'), ((3600, 3629), 'numpy.random.randn', 'np.random.randn', (['(5)', '(10)', '(64000)'], {}), '(5, 10, 64000)\n', (3615, 3629), True, 'import numpy as np\n'), ((4111, 4177), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['x', 'y', 'z']"}), "(length=32000, shift=16000, include_keys=['x', 'y', 'z'])\n", (4120, 4177), False, 'from padertorch.data.segment import Segmenter\n'), ((4525, 4561), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)'}), '(length=32000, shift=16000)\n', (4534, 4561), False, 'from padertorch.data.segment import Segmenter\n'), ((5015, 5088), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'copy_keys': "['num_samples', 'gender']"}), "(length=32000, shift=16000, copy_keys=['num_samples', 'gender'])\n", (5024, 5088), False, 'from padertorch.data.segment import Segmenter\n'), ((5568, 5622), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'exclude_keys': '"""y"""'}), "(length=32000, shift=16000, exclude_keys='y')\n", (5577, 5622), False, 'from padertorch.data.segment import Segmenter\n'), ((6114, 6189), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['x', 'y']", 'axis': '[-1, 0]'}), "(length=32000, shift=16000, include_keys=['x', 'y'], axis=[-1, 0])\n", (6123, 6189), False, 'from padertorch.data.segment import Segmenter\n'), ((6698, 6801), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['x', 'y', 'z']", 'axis': "{'x': 0, 'y': 1, 'z': -1}"}), "(length=32000, shift=16000, include_keys=['x', 'y', 'z'], axis={\n 'x': 0, 'y': 1, 'z': -1})\n", (6707, 6801), False, 'from padertorch.data.segment import Segmenter\n'), ((6861, 6890), 'numpy.random.randn', 'np.random.randn', (['(65000)', '(5)', '(10)'], {}), '(65000, 5, 10)\n', (6876, 6890), True, 'import numpy as np\n'), ((7433, 7522), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['x', 'y']", 'axis': "{'x': -1, 'y': 0}"}), "(length=32000, shift=16000, include_keys=['x', 'y'], axis={'x': -1,\n 'y': 0})\n", (7442, 7522), False, 'from padertorch.data.segment import Segmenter\n'), ((8059, 8154), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['audio_data']", 'axis': "{'audio_data': -1}"}), "(length=32000, shift=16000, include_keys=['audio_data'], axis={\n 'audio_data': -1})\n", (8068, 8154), False, 'from padertorch.data.segment import Segmenter\n'), ((8929, 8994), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['audio_data']"}), "(length=32000, shift=16000, include_keys=['audio_data'])\n", (8938, 8994), False, 'from padertorch.data.segment import Segmenter\n'), ((9815, 9940), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(32000)', 'shift': '(16000)', 'include_keys': "['audio_data']", 'exclude_keys': "['audio_data.y']", 'axis': "{'audio_data': -1}"}), "(length=32000, shift=16000, include_keys=['audio_data'],\n exclude_keys=['audio_data.y'], axis={'audio_data': -1})\n", (9824, 9940), False, 'from padertorch.data.segment import Segmenter\n'), ((223, 239), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (232, 239), True, 'import numpy as np\n'), ((246, 262), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (255, 262), True, 'import numpy as np\n'), ((610, 657), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y']"], {}), "(entry['x'], entry['y'])\n", (633, 657), True, 'import numpy as np\n'), ((815, 831), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (824, 831), True, 'import numpy as np\n'), ((838, 854), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (847, 854), True, 'import numpy as np\n'), ((1207, 1254), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y']"], {}), "(entry['x'], entry['y'])\n", (1230, 1254), True, 'import numpy as np\n'), ((1393, 1409), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (1402, 1409), True, 'import numpy as np\n'), ((1416, 1432), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (1425, 1432), True, 'import numpy as np\n'), ((2084, 2100), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (2093, 2100), True, 'import numpy as np\n'), ((2107, 2123), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (2116, 2123), True, 'import numpy as np\n'), ((2550, 2597), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y']"], {}), "(entry['x'], entry['y'])\n", (2573, 2597), True, 'import numpy as np\n'), ((2693, 2709), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (2702, 2709), True, 'import numpy as np\n'), ((2716, 2732), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (2725, 2732), True, 'import numpy as np\n'), ((3080, 3127), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y']"], {}), "(entry['x'], entry['y'])\n", (3103, 3127), True, 'import numpy as np\n'), ((3284, 3300), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (3293, 3300), True, 'import numpy as np\n'), ((3307, 3323), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (3316, 3323), True, 'import numpy as np\n'), ((3691, 3710), 'torch.tensor', 'torch.tensor', (['array'], {}), '(array)\n', (3703, 3710), False, 'import torch\n'), ((4014, 4061), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y']"], {}), "(entry['x'], entry['y'])\n", (4037, 4061), True, 'import numpy as np\n'), ((4219, 4235), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (4228, 4235), True, 'import numpy as np\n'), ((4242, 4258), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (4251, 4258), True, 'import numpy as np\n'), ((4577, 4593), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (4586, 4593), True, 'import numpy as np\n'), ((4600, 4616), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (4609, 4616), True, 'import numpy as np\n'), ((5474, 5521), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y']"], {}), "(entry['x'], entry['y'])\n", (5497, 5521), True, 'import numpy as np\n'), ((5638, 5654), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (5647, 5654), True, 'import numpy as np\n'), ((5661, 5677), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (5670, 5677), True, 'import numpy as np\n'), ((6231, 6247), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (6240, 6247), True, 'import numpy as np\n'), ((6627, 6680), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y'][:, 0]"], {}), "(entry['x'], entry['y'][:, 0])\n", (6650, 6680), True, 'import numpy as np\n'), ((7560, 7576), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (7569, 7576), True, 'import numpy as np\n'), ((7956, 8009), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['x']", "entry['y'][:, 0]"], {}), "(entry['x'], entry['y'][:, 0])\n", (7979, 8009), True, 'import numpy as np\n'), ((8289, 8305), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (8298, 8305), True, 'import numpy as np\n'), ((8688, 8763), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['audio_data']['x']", "entry['audio_data']['y']"], {}), "(entry['audio_data']['x'], entry['audio_data']['y'])\n", (8711, 8763), True, 'import numpy as np\n'), ((9479, 9554), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["entry['audio_data']['x']", "entry['audio_data']['y']"], {}), "(entry['audio_data']['x'], entry['audio_data']['y'])\n", (9502, 9554), True, 'import numpy as np\n'), ((551, 600), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (560, 600), True, 'import numpy as np\n'), ((2491, 2540), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (2500, 2540), True, 'import numpy as np\n'), ((3021, 3070), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (3030, 3070), True, 'import numpy as np\n'), ((4947, 4996), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (4956, 4996), True, 'import numpy as np\n'), ((5415, 5464), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (5424, 5464), True, 'import numpy as np\n'), ((5966, 6015), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (5975, 6015), True, 'import numpy as np\n'), ((6061, 6077), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (6070, 6077), True, 'import numpy as np\n'), ((6254, 6270), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (6263, 6270), True, 'import numpy as np\n'), ((6568, 6617), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (6577, 6617), True, 'import numpy as np\n'), ((7583, 7599), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (7592, 7599), True, 'import numpy as np\n'), ((7897, 7946), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (7906, 7946), True, 'import numpy as np\n'), ((8232, 8248), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (8241, 8248), True, 'import numpy as np\n'), ((8255, 8271), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (8264, 8271), True, 'import numpy as np\n'), ((8620, 8669), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (8629, 8669), True, 'import numpy as np\n'), ((8872, 8888), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (8881, 8888), True, 'import numpy as np\n'), ((9051, 9067), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (9060, 9067), True, 'import numpy as np\n'), ((9074, 9090), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (9083, 9090), True, 'import numpy as np\n'), ((9394, 9443), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (9403, 9443), True, 'import numpy as np\n'), ((9657, 9673), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (9666, 9673), True, 'import numpy as np\n'), ((9721, 9737), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (9730, 9737), True, 'import numpy as np\n'), ((10281, 10330), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (10290, 10330), True, 'import numpy as np\n'), ((10497, 10513), 'numpy.arange', 'np.arange', (['(16000)'], {}), '(16000)\n', (10506, 10513), True, 'import numpy as np\n'), ((10520, 10536), 'numpy.arange', 'np.arange', (['(16000)'], {}), '(16000)\n', (10529, 10536), True, 'import numpy as np\n'), ((10615, 10631), 'numpy.arange', 'np.arange', (['(15900)'], {}), '(15900)\n', (10624, 10631), True, 'import numpy as np\n'), ((10638, 10654), 'numpy.arange', 'np.arange', (['(15900)'], {}), '(15900)\n', (10647, 10654), True, 'import numpy as np\n'), ((10946, 11010), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(950)', 'include_keys': '"""x"""', 'mode': 'mode', 'padding': '(True)'}), "(length=950, include_keys='x', mode=mode, padding=True)\n", (10955, 11010), False, 'from padertorch.data.segment import Segmenter\n'), ((11447, 11522), 'padertorch.data.segment.Segmenter', 'Segmenter', ([], {'length': '(950)', 'shift': '(250)', 'include_keys': '"""x"""', 'mode': 'mode', 'padding': '(True)'}), "(length=950, shift=250, include_keys='x', mode=mode, padding=True)\n", (11456, 11522), False, 'from padertorch.data.segment import Segmenter\n'), ((1148, 1197), 'numpy.arange', 'np.arange', (['(idx * 16000)', '(16000 + (idx + 1) * 16000)'], {}), '(idx * 16000, 16000 + (idx + 1) * 16000)\n', (1157, 1197), True, 'import numpy as np\n'), ((4275, 4291), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (4284, 4291), True, 'import numpy as np\n'), ((4633, 4649), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (4642, 4649), True, 'import numpy as np\n'), ((9680, 9696), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (9689, 9696), True, 'import numpy as np\n'), ((10422, 10438), 'numpy.arange', 'np.arange', (['(65000)'], {}), '(65000)\n', (10431, 10438), True, 'import numpy as np\n'), ((11176, 11211), 'numpy.arange', 'np.arange', (['(0)', 'new_length[idx][mode]'], {}), '(0, new_length[idx][mode])\n', (11185, 11211), True, 'import numpy as np\n'), ((11688, 11723), 'numpy.arange', 'np.arange', (['(0)', 'new_length[idx][mode]'], {}), '(0, new_length[idx][mode])\n', (11697, 11723), True, 'import numpy as np\n')] |
#python3
#steven 05/04/2020 Sierpiński triangle
#random start random points polygon
#ratio of getRatioPoint() indicate the division of line
import matplotlib.pyplot as plt
import numpy as np
import math
def plotXY(x,y,color='k',ax=None):
c=color
if ax:
ax.plot(x,y,color=c)
else:
plt.plot(x,y,color=c)
#plt.plot(x,y)
def DrawTriangleLineByPt(startPt,stopPt,color='k',ax=None):
if startPt[0]>stopPt[0]: #switch
startPt = startPt + stopPt
stopPt = startPt - stopPt
startPt = startPt -stopPt
#print('s,t=',startPt,stopPt)
x = np.linspace(startPt[0],stopPt[0],30)
slope = (stopPt[1]-startPt[1])/(stopPt[0]-startPt[0])
b = startPt[1]-slope*startPt[0]
y = slope*x + b
plotXY(x,y,color,ax)
def drawPolygon(points): #point sequence
for i in range(1,len(points)):
DrawTriangleLineByPt(points[i-1],points[i])
DrawTriangleLineByPt(points[len(points)-1],points[0])
def trianglePolygon(points, N):
if N>0:
#draw big Polygon
drawPolygon(points)
#draw inner Polygon
NPoints=[]
for i in range(1,len(points)):
NPoints.append(getRatioPoint(points[i-1],points[i]))
NPoints.append(getRatioPoint(points[len(points)-1],points[0]))
drawPolygon(NPoints)
#recurse splited Polygon
for i in range(1,len(points)):
pts =[]
pts.append(NPoints[i])
pts.append(points[i])
pts.append(NPoints[i-1])
trianglePolygon(pts,N-1)
pts =[]
pts.append(NPoints[0])
pts.append(points[0])
pts.append(NPoints[len(NPoints)-1])
trianglePolygon(pts,N-1)
else:
return
def getRatioPoint(pt1,pt2,ratio=0.35):
#get point on the line of pt1 and pt2 acoording the ratio
#when ratio=0.5, return the middle point
#return np.mean( np.array([ pt1, pt2 ]), axis=0 )
return pt1*ratio + pt2*(1-ratio)
def getRandomPoint(min=0, max = 5):
return np.random.random((2,))*(max-min) + min #[0,5)
def getRandom(min=0, max = 5):
return np.random.random()*(max-min) + min
def circle(x,r=1):
return np.sqrt(r**2-x**2)
def getRandomCirclePoint(r=1,positive=True):
#get random point on circle,gurantee the polygon generated by these points is convex
pt = np.array([0,0],dtype=np.float64)
pt[0] = getRandom(min = -1*r, max = r)
if positive:
pt[1] = circle(pt[0], r=r)
else:
pt[1] = -1*circle(pt[0], r=r)
return pt
def getSequenceCirclePoints(r=1,Num=5,offset=0):
pts = []
#offset = math.pi/(Num+1)
for i in range(Num):
pt = np.array([0,0],dtype=np.float64)
angle = (i+1)*math.pi*2/Num + offset
pt[0] = r*math.cos(angle)
pt[1] = r*math.sin(angle)
pts.append(pt)
return pts
def triangleStart(N=3):
pts = getSequenceCirclePoints(Num=5) #get start point list
trianglePolygon(pts,N)
def main():
recurse = 4 #iterated depths
triangleStart(recurse)
plt.axes().set_aspect('equal')
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"math.sin",
"numpy.random.random",
"numpy.array",
"math.cos",
"numpy.linspace",
"numpy.sqrt"
] | [((598, 636), 'numpy.linspace', 'np.linspace', (['startPt[0]', 'stopPt[0]', '(30)'], {}), '(startPt[0], stopPt[0], 30)\n', (609, 636), True, 'import numpy as np\n'), ((2171, 2195), 'numpy.sqrt', 'np.sqrt', (['(r ** 2 - x ** 2)'], {}), '(r ** 2 - x ** 2)\n', (2178, 2195), True, 'import numpy as np\n'), ((2334, 2368), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (2342, 2368), True, 'import numpy as np\n'), ((3067, 3077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3075, 3077), True, 'import matplotlib.pyplot as plt\n'), ((309, 332), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'c'}), '(x, y, color=c)\n', (317, 332), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2689), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float64'}), '([0, 0], dtype=np.float64)\n', (2663, 2689), True, 'import numpy as np\n'), ((2015, 2037), 'numpy.random.random', 'np.random.random', (['(2,)'], {}), '((2,))\n', (2031, 2037), True, 'import numpy as np\n'), ((2105, 2123), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2121, 2123), True, 'import numpy as np\n'), ((2751, 2766), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (2759, 2766), False, 'import math\n'), ((2785, 2800), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (2793, 2800), False, 'import math\n'), ((3032, 3042), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (3040, 3042), True, 'import matplotlib.pyplot as plt\n')] |
# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python
# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
from grid_world import windy_grid, ACTION_SPACE
SMALL_ENOUGH = 1e-3 # threshold for convergence
def print_values(V, g):
for i in range(g.rows):
print("---------------------------")
for j in range(g.cols):
v = V.get((i,j), 0)
if v >= 0:
print(" %.2f|" % v, end="")
else:
print("%.2f|" % v, end="") # -ve sign takes up an extra space
print("")
def print_policy(P, g):
for i in range(g.rows):
print("---------------------------")
for j in range(g.cols):
a = P.get((i,j), ' ')
print(" %s |" % a, end="")
print("")
if __name__ == '__main__':
### define transition probabilities and grid ###
# the key is (s, a, s'), the value is the probability
# that is, transition_probs[(s, a, s')] = p(s' | s, a)
# any key NOT present will considered to be impossible (i.e. probability 0)
# we can take this from the grid object and convert it to the format we want
transition_probs = {}
# to reduce the dimensionality of the dictionary, we'll use deterministic
# rewards, r(s, a, s')
# note: you could make it simpler by using r(s') since the reward doesn't
# actually depend on (s, a)
rewards = {}
grid = windy_grid()
for (s, a), v in grid.probs.items():
for s2, p in v.items():
transition_probs[(s, a, s2)] = p
rewards[(s, a, s2)] = grid.rewards.get(s2, 0)
### probabilistic policy ###
policy = {
(2, 0): {'U': 0.5, 'R': 0.5},
(1, 0): {'U': 1.0},
(0, 0): {'R': 1.0},
(0, 1): {'R': 1.0},
(0, 2): {'R': 1.0},
(1, 2): {'U': 1.0},
(2, 1): {'R': 1.0},
(2, 2): {'U': 1.0},
(2, 3): {'L': 1.0},
}
print_policy(policy, grid)
# initialize V(s) = 0
V = {}
for s in grid.all_states():
V[s] = 0
gamma = 0.9 # discount factor
# repeat until convergence
it = 0
while True:
biggest_change = 0
for s in grid.all_states():
if not grid.is_terminal(s):
old_v = V[s]
new_v = 0 # we will accumulate the answer
for a in ACTION_SPACE:
for s2 in grid.all_states():
# action probability is deterministic
action_prob = policy[s].get(a, 0)
# reward is a function of (s, a, s'), 0 if not specified
r = rewards.get((s, a, s2), 0)
new_v += action_prob * transition_probs.get((s, a, s2), 0) * (r + gamma * V[s2])
# after done getting the new value, update the value table
V[s] = new_v
biggest_change = max(biggest_change, np.abs(old_v - V[s]))
print("iter:", it, "biggest_change:", biggest_change)
print_values(V, grid)
it += 1
if biggest_change < SMALL_ENOUGH:
break
print("V:", V)
print("\n\n")
# sanity check
# at state (1, 2), value is 0.5 * 0.9 * 1 + 0.5 * (-1) = -0.05
| [
"numpy.abs",
"grid_world.windy_grid",
"builtins.range"
] | [((487, 500), 'builtins.range', 'range', (['g.rows'], {}), '(g.rows)\n', (492, 500), False, 'from builtins import range\n'), ((783, 796), 'builtins.range', 'range', (['g.rows'], {}), '(g.rows)\n', (788, 796), False, 'from builtins import range\n'), ((1553, 1565), 'grid_world.windy_grid', 'windy_grid', ([], {}), '()\n', (1563, 1565), False, 'from grid_world import windy_grid, ACTION_SPACE\n'), ((556, 569), 'builtins.range', 'range', (['g.cols'], {}), '(g.cols)\n', (561, 569), False, 'from builtins import range\n'), ((852, 865), 'builtins.range', 'range', (['g.cols'], {}), '(g.cols)\n', (857, 865), False, 'from builtins import range\n'), ((2870, 2890), 'numpy.abs', 'np.abs', (['(old_v - V[s])'], {}), '(old_v - V[s])\n', (2876, 2890), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from get_radar_loc_gt import yaw, rotToYawPitchRoll
def getRotDiff(r1, r2):
C1 = yaw(r1)
C2 = yaw(r2)
C_err = np.matmul(C2.transpose(), C1)
yaw_err, _, _ = rotToYawPitchRoll(C_err)
return abs(yaw_err)
if __name__ == "__main__":
file = "localization_accuracy_icra4.csv"
dt1 = []
dt2 = []
dt3 = []
dt4 = []
dt5 = []
dr1 = []
dr2 = []
dr3 = []
dr4 = []
dr5 = []
with open(file, 'r') as f:
f.readline()
for line in f:
row = line.split(',')
gtx = float(row[15])
gty = float(row[16])
gtyaw = float(row[17])
dt1.append(np.sqrt((gtx - float(row[0]))**2 + (gty - float(row[1]))**2))
dr1.append(180 * getRotDiff(gtyaw, float(row[2])) / np.pi)
dt2.append(np.sqrt((gtx - float(row[3]))**2 + (gty - float(row[4]))**2))
dr2.append(180 * getRotDiff(gtyaw, float(row[5])) / np.pi)
dt3.append(np.sqrt((gtx - float(row[6]))**2 + (gty - float(row[7]))**2))
dr3.append(180 * getRotDiff(gtyaw, float(row[8])) / np.pi)
dt4.append(np.sqrt((gtx - float(row[9]))**2 + (gty - float(row[10]))**2))
dr4.append(180 * getRotDiff(gtyaw, float(row[11])) / np.pi)
dt5.append(np.sqrt((gtx - float(row[12]))**2 + (gty - float(row[13]))**2))
dr5.append(180 * getRotDiff(gtyaw, float(row[14])) / np.pi)
dt1 = np.array(dt1)
dt2 = np.array(dt2)
dt3 = np.array(dt3)
dt4 = np.array(dt4)
dt5 = np.array(dt5)
dr1 = np.array(dr1)
dr2 = np.array(dr2)
dr3 = np.array(dr3)
dr4 = np.array(dr4)
dr5 = np.array(dr5)
np.savetxt('dr3', dr3)
print('RIGID: dt: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(np.median(dt1), np.mean((dt1 - np.median(dt1))**2), np.median(dr1), np.mean((dr1 - np.median(dr1))**2)))
print('DOPP ONLY: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(np.median(dt2), np.mean((dt2 - np.median(dt2))**2), np.median(dr2), np.mean((dr2 - np.median(dr2))**2)))
print('DOPP + MD: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(np.median(dt3), np.mean((dt3 - np.median(dt3))**2), np.median(dr3), np.mean((dr3 - np.median(dr3))**2)))
print('MD ONLY: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(np.median(dt4), np.mean((dt4 - np.median(dt4))**2), np.median(dr4), np.mean((dr4 - np.median(dr4))**2)))
print('MD + DOPP: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(np.median(dt5), np.mean((dt5 - np.median(dt5))**2), np.median(dr5), np.mean((dr5 - np.median(dr5))**2)))
matplotlib.rcParams.update({"font.size" : 16, 'xtick.labelsize' : 16, 'ytick.labelsize' : 16,
'axes.linewidth' : 1.5, 'font.family' : 'serif', 'pdf.fonttype' : 42})
plt.figure(figsize=(10, 5.5))
bins = np.arange(0, 4.0, 0.25)
plt.grid(which='both', linestyle='--', alpha=0.5, axis='y')
plt.hist([dt1, dt4, dt3], bins=bins, label=['RIGID', 'MC', 'MC+Dopp'], color=['r', 'b', 'limegreen'], rwidth=0.6)
plt.xlabel('Translation Error (m)', fontsize=18)
plt.ylabel('Number of Radar Pairs', fontsize=18)
plt.legend(loc='best')
plt.savefig('localization_accuracy.pdf', bbox_inches='tight', pad_inches=0.0)
# plt.show()
| [
"matplotlib.pyplot.hist",
"numpy.median",
"matplotlib.rcParams.update",
"numpy.savetxt",
"get_radar_loc_gt.rotToYawPitchRoll",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"get_radar_loc_gt.yaw",
"matplotlib.pyplot.grid",
... | [((155, 162), 'get_radar_loc_gt.yaw', 'yaw', (['r1'], {}), '(r1)\n', (158, 162), False, 'from get_radar_loc_gt import yaw, rotToYawPitchRoll\n'), ((172, 179), 'get_radar_loc_gt.yaw', 'yaw', (['r2'], {}), '(r2)\n', (175, 179), False, 'from get_radar_loc_gt import yaw, rotToYawPitchRoll\n'), ((242, 266), 'get_radar_loc_gt.rotToYawPitchRoll', 'rotToYawPitchRoll', (['C_err'], {}), '(C_err)\n', (259, 266), False, 'from get_radar_loc_gt import yaw, rotToYawPitchRoll\n'), ((1507, 1520), 'numpy.array', 'np.array', (['dt1'], {}), '(dt1)\n', (1515, 1520), True, 'import numpy as np\n'), ((1531, 1544), 'numpy.array', 'np.array', (['dt2'], {}), '(dt2)\n', (1539, 1544), True, 'import numpy as np\n'), ((1555, 1568), 'numpy.array', 'np.array', (['dt3'], {}), '(dt3)\n', (1563, 1568), True, 'import numpy as np\n'), ((1579, 1592), 'numpy.array', 'np.array', (['dt4'], {}), '(dt4)\n', (1587, 1592), True, 'import numpy as np\n'), ((1603, 1616), 'numpy.array', 'np.array', (['dt5'], {}), '(dt5)\n', (1611, 1616), True, 'import numpy as np\n'), ((1627, 1640), 'numpy.array', 'np.array', (['dr1'], {}), '(dr1)\n', (1635, 1640), True, 'import numpy as np\n'), ((1651, 1664), 'numpy.array', 'np.array', (['dr2'], {}), '(dr2)\n', (1659, 1664), True, 'import numpy as np\n'), ((1675, 1688), 'numpy.array', 'np.array', (['dr3'], {}), '(dr3)\n', (1683, 1688), True, 'import numpy as np\n'), ((1699, 1712), 'numpy.array', 'np.array', (['dr4'], {}), '(dr4)\n', (1707, 1712), True, 'import numpy as np\n'), ((1723, 1736), 'numpy.array', 'np.array', (['dr5'], {}), '(dr5)\n', (1731, 1736), True, 'import numpy as np\n'), ((1742, 1764), 'numpy.savetxt', 'np.savetxt', (['"""dr3"""', 'dr3'], {}), "('dr3', dr3)\n", (1752, 1764), True, 'import numpy as np\n'), ((2624, 2790), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 16, 'xtick.labelsize': 16, 'ytick.labelsize': 16,\n 'axes.linewidth': 1.5, 'font.family': 'serif', 'pdf.fonttype': 42}"], {}), "({'font.size': 16, 'xtick.labelsize': 16,\n 'ytick.labelsize': 16, 'axes.linewidth': 1.5, 'font.family': 'serif',\n 'pdf.fonttype': 42})\n", (2650, 2790), False, 'import matplotlib\n'), ((2825, 2854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5.5)'}), '(figsize=(10, 5.5))\n', (2835, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2889), 'numpy.arange', 'np.arange', (['(0)', '(4.0)', '(0.25)'], {}), '(0, 4.0, 0.25)\n', (2875, 2889), True, 'import numpy as np\n'), ((2894, 2953), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""', 'linestyle': '"""--"""', 'alpha': '(0.5)', 'axis': '"""y"""'}), "(which='both', linestyle='--', alpha=0.5, axis='y')\n", (2902, 2953), True, 'import matplotlib.pyplot as plt\n'), ((2958, 3075), 'matplotlib.pyplot.hist', 'plt.hist', (['[dt1, dt4, dt3]'], {'bins': 'bins', 'label': "['RIGID', 'MC', 'MC+Dopp']", 'color': "['r', 'b', 'limegreen']", 'rwidth': '(0.6)'}), "([dt1, dt4, dt3], bins=bins, label=['RIGID', 'MC', 'MC+Dopp'],\n color=['r', 'b', 'limegreen'], rwidth=0.6)\n", (2966, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3076, 3124), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Translation Error (m)"""'], {'fontsize': '(18)'}), "('Translation Error (m)', fontsize=18)\n", (3086, 3124), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Radar Pairs"""'], {'fontsize': '(18)'}), "('Number of Radar Pairs', fontsize=18)\n", (3139, 3177), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3204), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3192, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3209, 3286), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""localization_accuracy.pdf"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.0)'}), "('localization_accuracy.pdf', bbox_inches='tight', pad_inches=0.0)\n", (3220, 3286), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1846), 'numpy.median', 'np.median', (['dt1'], {}), '(dt1)\n', (1841, 1846), True, 'import numpy as np\n'), ((1884, 1898), 'numpy.median', 'np.median', (['dr1'], {}), '(dr1)\n', (1893, 1898), True, 'import numpy as np\n'), ((2003, 2017), 'numpy.median', 'np.median', (['dt2'], {}), '(dt2)\n', (2012, 2017), True, 'import numpy as np\n'), ((2055, 2069), 'numpy.median', 'np.median', (['dr2'], {}), '(dr2)\n', (2064, 2069), True, 'import numpy as np\n'), ((2174, 2188), 'numpy.median', 'np.median', (['dt3'], {}), '(dt3)\n', (2183, 2188), True, 'import numpy as np\n'), ((2226, 2240), 'numpy.median', 'np.median', (['dr3'], {}), '(dr3)\n', (2235, 2240), True, 'import numpy as np\n'), ((2343, 2357), 'numpy.median', 'np.median', (['dt4'], {}), '(dt4)\n', (2352, 2357), True, 'import numpy as np\n'), ((2395, 2409), 'numpy.median', 'np.median', (['dr4'], {}), '(dr4)\n', (2404, 2409), True, 'import numpy as np\n'), ((2514, 2528), 'numpy.median', 'np.median', (['dt5'], {}), '(dt5)\n', (2523, 2528), True, 'import numpy as np\n'), ((2566, 2580), 'numpy.median', 'np.median', (['dr5'], {}), '(dr5)\n', (2575, 2580), True, 'import numpy as np\n'), ((1863, 1877), 'numpy.median', 'np.median', (['dt1'], {}), '(dt1)\n', (1872, 1877), True, 'import numpy as np\n'), ((1915, 1929), 'numpy.median', 'np.median', (['dr1'], {}), '(dr1)\n', (1924, 1929), True, 'import numpy as np\n'), ((2034, 2048), 'numpy.median', 'np.median', (['dt2'], {}), '(dt2)\n', (2043, 2048), True, 'import numpy as np\n'), ((2086, 2100), 'numpy.median', 'np.median', (['dr2'], {}), '(dr2)\n', (2095, 2100), True, 'import numpy as np\n'), ((2205, 2219), 'numpy.median', 'np.median', (['dt3'], {}), '(dt3)\n', (2214, 2219), True, 'import numpy as np\n'), ((2257, 2271), 'numpy.median', 'np.median', (['dr3'], {}), '(dr3)\n', (2266, 2271), True, 'import numpy as np\n'), ((2374, 2388), 'numpy.median', 'np.median', (['dt4'], {}), '(dt4)\n', (2383, 2388), True, 'import numpy as np\n'), ((2426, 2440), 'numpy.median', 'np.median', (['dr4'], {}), '(dr4)\n', (2435, 2440), True, 'import numpy as np\n'), ((2545, 2559), 'numpy.median', 'np.median', (['dt5'], {}), '(dt5)\n', (2554, 2559), True, 'import numpy as np\n'), ((2597, 2611), 'numpy.median', 'np.median', (['dr5'], {}), '(dr5)\n', (2606, 2611), True, 'import numpy as np\n')] |
r"""
Support for embedded TeX expressions in Matplotlib.
Requirements:
* LaTeX.
* \*Agg backends: dvipng>=1.6.
* PS backend: PSfrag, dvips, and Ghostscript>=9.0.
* PDF and SVG backends: if LuaTeX is present, it will be used to speed up some
post-processing steps, but note that it is not used to parse the TeX string
itself (only LaTeX is supported).
To enable TeX rendering of all text in your Matplotlib figure, set
:rc:`text.usetex` to True.
TeX and dvipng/dvips processing results are cached
in ~/.matplotlib/tex.cache for reuse between sessions.
`TexManager.get_rgba` can also be used to directly obtain raster output as RGBA
NumPy arrays.
"""
import functools
import hashlib
import logging
import os
from pathlib import Path
import subprocess
from tempfile import TemporaryDirectory
import numpy as np
from packaging.version import parse as parse_version
import matplotlib as mpl
from matplotlib import _api, cbook, dviread, rcParams
_log = logging.getLogger(__name__)
def _usepackage_if_not_loaded(package, *, option=None):
"""
Output LaTeX code that loads a package (possibly with an option) if it
hasn't been loaded yet.
LaTeX cannot load twice a package with different options, so this helper
can be used to protect against users loading arbitrary packages/options in
their custom preamble.
"""
option = f"[{option}]" if option is not None else ""
return (
r"\makeatletter"
r"\@ifpackageloaded{%(package)s}{}{\usepackage%(option)s{%(package)s}}"
r"\makeatother"
) % {"package": package, "option": option}
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a directory.
Repeated calls to this constructor always return the same instance.
"""
texcache = os.path.join(mpl.get_cachedir(), 'tex.cache')
_grey_arrayd = {}
_font_family = 'serif'
_font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
_font_info = {
'new century schoolbook': ('pnc', r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
# Loading the type1ec package ensures that cm-super is installed, which
# is necessary for unicode computer modern. (It also allows the use of
# computer modern at arbitrary sizes, but that's just a side effect.)
'monospace': ('cmtt', r'\usepackage{type1ec}'),
'computer modern roman': ('cmr', r'\usepackage{type1ec}'),
'computer modern sans serif': ('cmss', r'\usepackage{type1ec}'),
'computer modern typewriter': ('cmtt', r'\usepackage{type1ec}')}
_font_types = {
'new century schoolbook': 'serif', 'bookman': 'serif',
'times': 'serif', 'palatino': 'serif', 'charter': 'serif',
'computer modern roman': 'serif', 'zapf chancery': 'cursive',
'helvetica': 'sans-serif', 'avant garde': 'sans-serif',
'computer modern sans serif': 'sans-serif',
'courier': 'monospace', 'computer modern typewriter': 'monospace'}
grey_arrayd = _api.deprecate_privatize_attribute("3.5")
font_family = _api.deprecate_privatize_attribute("3.5")
font_families = _api.deprecate_privatize_attribute("3.5")
font_info = _api.deprecate_privatize_attribute("3.5")
@functools.lru_cache() # Always return the same instance.
def __new__(cls):
Path(cls.texcache).mkdir(parents=True, exist_ok=True)
return object.__new__(cls)
def get_font_config(self):
ff = rcParams['font.family']
ff_val = ff[0].lower() if len(ff) == 1 else None
reduced_notation = False
if len(ff) == 1 and ff_val in self._font_families:
self._font_family = ff_val
elif len(ff) == 1 and ff_val in self._font_info:
reduced_notation = True
self._font_family = self._font_types[ff_val]
else:
_log.info('font.family must be one of (%s) when text.usetex is '
'True. serif will be used by default.',
', '.join(self._font_families))
self._font_family = 'serif'
fontconfig = [self._font_family]
fonts = {}
for font_family in self._font_families:
if reduced_notation and self._font_family == font_family:
fonts[font_family] = self._font_info[ff_val]
else:
for font in rcParams['font.' + font_family]:
if font.lower() in self._font_info:
fonts[font_family] = self._font_info[font.lower()]
_log.debug(
'family: %s, font: %s, info: %s',
font_family, font, self._font_info[font.lower()])
break
else:
_log.debug('%s font is not compatible with usetex.',
font)
else:
_log.info('No LaTeX-compatible font found for the %s font'
'family in rcParams. Using default.',
font_family)
fonts[font_family] = self._font_info[font_family]
fontconfig.append(fonts[font_family][0])
# Add a hash of the latex preamble to fontconfig so that the
# correct png is selected for strings rendered with same font and dpi
# even if the latex preamble changes within the session
preamble_bytes = self.get_custom_preamble().encode('utf-8')
fontconfig.append(hashlib.md5(preamble_bytes).hexdigest())
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = {fonts[family][1]
for family in ['serif', 'sans-serif', 'monospace']}
if self._font_family == 'cursive':
cmd.add(fonts['cursive'][1])
cmd.add(r'\usepackage{type1cm}')
self._font_preamble = '\n'.join(sorted(cmd))
return ''.join(fontconfig)
def get_basefile(self, tex, fontsize, dpi=None):
"""
Return a filename based on a hash of the string, fontsize, and dpi.
"""
s = ''.join([tex, self.get_font_config(), '%f' % fontsize,
self.get_custom_preamble(), str(dpi or '')])
return os.path.join(
self.texcache, hashlib.md5(s.encode('utf-8')).hexdigest())
def get_font_preamble(self):
"""
Return a string containing font configuration for the tex preamble.
"""
return self._font_preamble
def get_custom_preamble(self):
"""Return a string containing user additions to the tex preamble."""
return rcParams['text.latex.preamble']
def _get_preamble(self):
return "\n".join([
r"\documentclass{article}",
# Pass-through \mathdefault, which is used in non-usetex mode to
# use the default text font but was historically suppressed in
# usetex mode.
r"\newcommand{\mathdefault}[1]{#1}",
self._font_preamble,
r"\usepackage[utf8]{inputenc}",
r"\DeclareUnicodeCharacter{2212}{\ensuremath{-}}",
# geometry is loaded before the custom preamble as convert_psfrags
# relies on a custom preamble to change the geometry.
r"\usepackage[papersize=72in, margin=1in]{geometry}",
self.get_custom_preamble(),
# Use `underscore` package to take care of underscores in text
# The [strings] option allows to use underscores in file names
_usepackage_if_not_loaded("underscore", option="strings"),
# Custom packages (e.g. newtxtext) may already have loaded textcomp
# with different options.
_usepackage_if_not_loaded("textcomp"),
])
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size.
Return the file name.
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex' % basefile
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(self._font_family,
r'{\rmfamily %s}')
Path(texfile).write_text(
r"""
%s
\pagestyle{empty}
\begin{document}
%% The empty hbox ensures that a page is printed even for empty inputs, except
%% when using psfrag which gets confused by it.
\fontsize{%f}{%f}%%
\ifdefined\psfrag\else\hbox{}\fi%%
%s
\end{document}
""" % (self._get_preamble(), fontsize, fontsize * 1.25, fontcmd % tex),
encoding='utf-8')
return texfile
def _run_checked_subprocess(self, command, tex, *, cwd=None):
_log.debug(cbook._pformat_subprocess(command))
try:
report = subprocess.check_output(
command, cwd=cwd if cwd is not None else self.texcache,
stderr=subprocess.STDOUT)
except FileNotFoundError as exc:
raise RuntimeError(
'Failed to process string with tex because {} could not be '
'found'.format(command[0])) from exc
except subprocess.CalledProcessError as exc:
raise RuntimeError(
'{prog} was not able to process the following string:\n'
'{tex!r}\n\n'
'Here is the full report generated by {prog}:\n'
'{exc}\n\n'.format(
prog=command[0],
tex=tex.encode('unicode_escape'),
exc=exc.output.decode('utf-8'))) from exc
_log.debug(report)
return report
def make_dvi(self, tex, fontsize):
"""
Generate a dvi file containing latex's layout of tex string.
Return the file name.
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
if not os.path.exists(dvifile):
texfile = Path(self.make_tex(tex, fontsize))
# Generate the dvi in a temporary directory to avoid race
# conditions e.g. if multiple processes try to process the same tex
# string at the same time. Having tmpdir be a subdirectory of the
# final output dir ensures that they are on the same filesystem,
# and thus replace() works atomically. It also allows referring to
# the texfile with a relative path (for pathological MPLCONFIGDIRs,
# the absolute path may contain characters (e.g. ~) that TeX does
# not support.)
with TemporaryDirectory(dir=Path(dvifile).parent) as tmpdir:
self._run_checked_subprocess(
["latex", "-interaction=nonstopmode", "--halt-on-error",
f"../{texfile.name}"], tex, cwd=tmpdir)
(Path(tmpdir) / Path(dvifile).name).replace(dvifile)
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
Generate a png file containing latex's rendering of tex string.
Return the file name.
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png' % basefile
# see get_rgba for a discussion of the background
if not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
cmd = ["dvipng", "-bg", "Transparent", "-D", str(dpi),
"-T", "tight", "-o", pngfile, dvifile]
# When testing, disable FreeType rendering for reproducibility; but
# dvipng 1.16 has a bug (fixed in f3ff241) that breaks --freetype0
# mode, so for it we keep FreeType enabled; the image will be
# slightly off.
bad_ver = parse_version("1.16")
if (getattr(mpl, "_called_from_pytest", False)
and mpl._get_executable_info("dvipng").version != bad_ver):
cmd.insert(1, "--freetype0")
self._run_checked_subprocess(cmd, tex)
return pngfile
def get_grey(self, tex, fontsize=None, dpi=None):
"""Return the alpha channel."""
if not fontsize:
fontsize = rcParams['font.size']
if not dpi:
dpi = rcParams['savefig.dpi']
key = tex, self.get_font_config(), fontsize, dpi
alpha = self._grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
rgba = mpl.image.imread(os.path.join(self.texcache, pngfile))
self._grey_arrayd[key] = alpha = rgba[:, :, -1]
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
r"""
Return latex's rendering of the tex string as an rgba array.
Examples
--------
>>> texmanager = TexManager()
>>> s = r"\TeX\ is $\displaystyle\sum_n\frac{-e^{i\pi}}{2^n}$!"
>>> Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1, 0, 0))
"""
alpha = self.get_grey(tex, fontsize, dpi)
rgba = np.empty((*alpha.shape, 4))
rgba[..., :3] = mpl.colors.to_rgb(rgb)
rgba[..., -1] = alpha
return rgba
def get_text_width_height_descent(self, tex, fontsize, renderer=None):
"""Return width, height and descent of the text."""
if tex.strip() == '':
return 0, 0, 0
dvifile = self.make_dvi(tex, fontsize)
dpi_fraction = renderer.points_to_pixels(1.) if renderer else 1
with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:
page, = dvi
# A total height (including the descent) needs to be returned.
return page.width, page.height + page.descent, page.descent
| [
"matplotlib.get_cachedir",
"matplotlib.cbook._pformat_subprocess",
"matplotlib.dviread.Dvi",
"hashlib.md5",
"numpy.empty",
"subprocess.check_output",
"packaging.version.parse",
"os.path.exists",
"matplotlib.colors.to_rgb",
"pathlib.Path",
"matplotlib._api.deprecate_privatize_attribute",
"funct... | [((996, 1023), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1013, 1023), False, 'import logging\n'), ((3660, 3701), 'matplotlib._api.deprecate_privatize_attribute', '_api.deprecate_privatize_attribute', (['"""3.5"""'], {}), "('3.5')\n", (3694, 3701), False, 'from matplotlib import _api, cbook, dviread, rcParams\n'), ((3721, 3762), 'matplotlib._api.deprecate_privatize_attribute', '_api.deprecate_privatize_attribute', (['"""3.5"""'], {}), "('3.5')\n", (3755, 3762), False, 'from matplotlib import _api, cbook, dviread, rcParams\n'), ((3784, 3825), 'matplotlib._api.deprecate_privatize_attribute', '_api.deprecate_privatize_attribute', (['"""3.5"""'], {}), "('3.5')\n", (3818, 3825), False, 'from matplotlib import _api, cbook, dviread, rcParams\n'), ((3843, 3884), 'matplotlib._api.deprecate_privatize_attribute', '_api.deprecate_privatize_attribute', (['"""3.5"""'], {}), "('3.5')\n", (3877, 3884), False, 'from matplotlib import _api, cbook, dviread, rcParams\n'), ((3893, 3914), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (3912, 3914), False, 'import functools\n'), ((1876, 1894), 'matplotlib.get_cachedir', 'mpl.get_cachedir', ([], {}), '()\n', (1892, 1894), True, 'import matplotlib as mpl\n'), ((13939, 13966), 'numpy.empty', 'np.empty', (['(*alpha.shape, 4)'], {}), '((*alpha.shape, 4))\n', (13947, 13966), True, 'import numpy as np\n'), ((13992, 14014), 'matplotlib.colors.to_rgb', 'mpl.colors.to_rgb', (['rgb'], {}), '(rgb)\n', (14009, 14014), True, 'import matplotlib as mpl\n'), ((9552, 9586), 'matplotlib.cbook._pformat_subprocess', 'cbook._pformat_subprocess', (['command'], {}), '(command)\n', (9577, 9586), False, 'from matplotlib import _api, cbook, dviread, rcParams\n'), ((9624, 9734), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'cwd': '(cwd if cwd is not None else self.texcache)', 'stderr': 'subprocess.STDOUT'}), '(command, cwd=cwd if cwd is not None else self.\n texcache, stderr=subprocess.STDOUT)\n', (9647, 9734), False, 'import subprocess\n'), ((10753, 10776), 'os.path.exists', 'os.path.exists', (['dvifile'], {}), '(dvifile)\n', (10767, 10776), False, 'import os\n'), ((12122, 12145), 'os.path.exists', 'os.path.exists', (['pngfile'], {}), '(pngfile)\n', (12136, 12145), False, 'import os\n'), ((12614, 12635), 'packaging.version.parse', 'parse_version', (['"""1.16"""'], {}), "('1.16')\n", (12627, 12635), True, 'from packaging.version import parse as parse_version\n'), ((14400, 14439), 'matplotlib.dviread.Dvi', 'dviread.Dvi', (['dvifile', '(72 * dpi_fraction)'], {}), '(dvifile, 72 * dpi_fraction)\n', (14411, 14439), False, 'from matplotlib import _api, cbook, dviread, rcParams\n'), ((3983, 4001), 'pathlib.Path', 'Path', (['cls.texcache'], {}), '(cls.texcache)\n', (3987, 4001), False, 'from pathlib import Path\n'), ((9042, 9055), 'pathlib.Path', 'Path', (['texfile'], {}), '(texfile)\n', (9046, 9055), False, 'from pathlib import Path\n'), ((13356, 13392), 'os.path.join', 'os.path.join', (['self.texcache', 'pngfile'], {}), '(self.texcache, pngfile)\n', (13368, 13392), False, 'import os\n'), ((6210, 6237), 'hashlib.md5', 'hashlib.md5', (['preamble_bytes'], {}), '(preamble_bytes)\n', (6221, 6237), False, 'import hashlib\n'), ((12721, 12755), 'matplotlib._get_executable_info', 'mpl._get_executable_info', (['"""dvipng"""'], {}), "('dvipng')\n", (12745, 12755), True, 'import matplotlib as mpl\n'), ((11457, 11470), 'pathlib.Path', 'Path', (['dvifile'], {}), '(dvifile)\n', (11461, 11470), False, 'from pathlib import Path\n'), ((11695, 11707), 'pathlib.Path', 'Path', (['tmpdir'], {}), '(tmpdir)\n', (11699, 11707), False, 'from pathlib import Path\n'), ((11710, 11723), 'pathlib.Path', 'Path', (['dvifile'], {}), '(dvifile)\n', (11714, 11723), False, 'from pathlib import Path\n')] |
from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup
import os
from collections import defaultdict
import inspect
import pandas as pd
import numpy as np
from scipy import stats
import re
from graphviz import Digraph
import plotly
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import json
import re
import uuid
from functools import wraps
from importlib import reload
from werkzeug.utils import secure_filename
from sklearn.preprocessing import OneHotEncoder, StandardScaler, label_binarize, KBinsDiscretizer
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from utils import *
from fairness_instru import *
import warnings
warnings.filterwarnings('ignore')
app = Flask(__name__)
cache = []
user_id = uuid.uuid1()
# uploads_dir = os.path.join(app.instance_path, 'media')
script_name = os.getenv('SCRIPT_NAME', '')
# variable essentials are package import commands that are written to function python file.
#
# function python file is excutable scripts that calls fairness_instru and then generates DAGs and intermediate log dicts, which is stored in pickle format.
essentials = """import os
from collections import defaultdict
import inspect
import pandas as pd
import numpy as np
from scipy import stats
import re
from graphviz import Digraph
import plotly
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import json
from functools import wraps
from sklearn.preprocessing import OneHotEncoder, StandardScaler, label_binarize, KBinsDiscretizer
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from utils import *
from fairness_instru import *
"""
# Play data pipeline codes that generates ADULT_NORMAL case
playdata_AD_normal = """def adult_pipeline_normal(f_path = 'data/adult_train.csv'):
data = pd.read_csv(f_path, na_values='?', index_col=0)
# data = raw_data.dropna()
labels = label_binarize(data['income-per-year'], ['>50K', '<=50K'])
nested_categorical_feature_transformation = Pipeline(steps=[
('impute', SimpleImputer(missing_values=np.nan, strategy='most_frequent')),
('encode', OneHotEncoder(handle_unknown='ignore'))
])
nested_feature_transformation = ColumnTransformer(transformers=[
('categorical', nested_categorical_feature_transformation, ['education', 'workclass']),
('numeric', StandardScaler(), ['age', 'hours-per-week'])
])
nested_pipeline = Pipeline([
('features', nested_feature_transformation),
('classifier', DecisionTreeClassifier())])
return nested_pipeline"""
# Play data pipeline that generates codes case
playdata_CM = """def compas_pipeline(f_path = 'data/compas_train.csv'):
data = pd.read_csv(f_path)
data = data[['sex', 'dob','age','c_charge_degree', 'race','score_text','priors_count','days_b_screening_arrest',
'decile_score','is_recid','two_year_recid','c_jail_in','c_jail_out']]
data = data.loc[(data['days_b_screening_arrest'] <= 30)]
data = data.loc[(data['days_b_screening_arrest'] >= -30)]
data = data.loc[(data['is_recid'] != -1)]
data = data.loc[(data['c_charge_degree'] != "O")]
data = data.loc[(data['score_text'] != 'N/A')]
data = data.replace('Medium', "Low")
labels = LabelEncoder().fit_transform(data['score_text'])
#sklearn pipeline
impute1_and_onehot = Pipeline([('imputer1', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
impute2_and_bin = Pipeline([('imputer2', SimpleImputer(strategy='mean')),
('discretizer', KBinsDiscretizer(n_bins=4, encode='ordinal', strategy='uniform'))])
featurizer = ColumnTransformer(transformers=[
('impute1_and_onehot', impute1_and_onehot, ['is_recid']),
('impute2_and_bin', impute2_and_bin, ['age'])
])
pipeline = Pipeline([
('features', featurizer),
('classifier', LogisticRegression())
])
return pipeline"""
# variable initilization
target_name, pos_group, code, name, organization = "", "", "", "", ""
num_target, cat_target = [], []
cache = []
log_dict = {}
plot_dict = {}
rand_rgb = {}
# flask secret_key, randomly generated
app.secret_key = "shdgfashfasdsfsdf"
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
"""
Function checking the login status of user.
If no login status, redirect to login package
"""
if 'logged_in' in session and session['logged_in']:
return f(*args, **kwargs)
else:
flash('You need to login first')
return redirect(url_for('login'))
return wrap
@app.route('/login', methods = ['GET', 'POST'])
def login():
"""
Login Page Flask function
In Login page:
takes in user information.
dropdown menu for user to select play data
upload function for data upload if new case specified by user
Returns:
url for login page. Redirect to main home page if valid login session
write pipeline code to executable function python file which calls fairness_instru wrapper generating DAGs and intermediate log dict files
load intermediate dicts as well as DAG(stored in svg) and parse them to main home page
"""
error = None
# set default_value here
if request.method == 'POST':
global name
name = request.form['name'] if request.form['name'] else '<NAME>'
global organization
organization = request.form['organization'] if request.form['organization'] else 'Y university'
global demo
demo = request.form['demo']
global code
code = """def adult_pipeline_easy(f_path = 'playdata/AD_train.csv'):
raw_data = pd.read_csv(f_path, na_values='?', index_col=0)
data = raw_data.dropna()
labels = label_binarize(data['income-per-year'], ['>50K', '<=50K'])
feature_transformation = ColumnTransformer(transformers=[
('categorical', OneHotEncoder(handle_unknown='ignore'), ['education', 'workclass']),
('numeric', StandardScaler(), ['age', 'hours-per-week'])
])
income_pipeline = Pipeline([
('features', feature_transformation),
('classifier', DecisionTreeClassifier())])
return income_pipeline""" if not request.form['code'] else request.form['code']
global target_name, pos_group
target_name, pos_group = list(map(lambda x: x.strip(), request.form['target_name'].split(','))) if request.form['target_name'] else ("income-per-year", ">50K")
global cat_target
cat_target = list(map(lambda x: x.strip(), request.form['cat_target'].split(','))) if request.form['cat_target'] else ['sex', 'race']
global num_target
num_target = list(map(lambda x: x.strip(), request.form['num_target'].split(','))) if request.form['num_target'] else ['age', 'hours-per-week']
global save_path
save_path = f'experiments/{user_id}'
global perform_target
perform_target = request.form['perform_target'] if request.form['perform_target'] else 'PR'
session['logged_in'] = True
# cache is used for stacking user click event, so that figures will show in sequence
global cache
cache = []
global log_dict
global rand_rgb
global plot_dict
global target_df
# to_json_dict stores all user entered info, saved in uid format
to_json_dict = request.form.to_dict(flat=False)
with open(f'media/{user_id}.json', 'w+') as f:
json.dump(to_json_dict, f)
flash('You were just logged in')
# options for dsiplaying play data cases
# load saved play data intermediate dict files generated from fairness_instru
if not demo == 'USER':
log_dict = pickle.load(open(f"playdata/{demo}/checkpoints/log_dict_train.p", 'rb'))
rand_rgb = pickle.load(open(f"playdata/{demo}/checkpoints/rand_color_train.p", 'rb'))
rand_rgb = pickle.load(open(f"playdata/{demo}/checkpoints/rand_color_train.p", 'rb'))
plot_dict = pickle.load(open(f"playdata/{demo}/checkpoints/plot_dict_train.p", 'rb'))
target_df = pickle.load(open(f"playdata/{demo}/checkpoints/target_df_train.p", 'rb'))
if demo =='AD_normal':
code = playdata_AD_normal
elif demo == 'CM':
code = playdata_CM
target_name, pos_group = 'score_text', 'High'
cat_target = ['sex', 'race']
num_target = ['age']
with open(f"playdata/{demo}/DAG/pipeline.svg", 'r') as content:
svg = content.read()
with open(f'templates/{demo}.html', 'w+') as f:
f.write("{% extends 'index1.html' %}\n")
f.write("{% block content %}\n")
f.write(svg)
f.write('\n')
f.write("{% endblock %}\n")
return redirect(url_for('home'))
# below handles user defined cases. including:
# save to executable function python file which generates DAGs and intermediate dict files
# load intermediate dict files
# load dags
# parse intermediate dict and DAGs to main home page
# laod user uploaded file
file = request.files['file']
if file.filename == '':
flash('No selected File')
return redirect(url_for('logged_in'))
if file:
filename = secure_filename(file.filename)
file.save(f'media/{user_id}.csv')
# pipeline codes to be outputed into executable function python file. Extract pipeline function title first.
function_title = code.split('(')[0].replace('def ','')+"()"
input_args = code.split('(')[1].split(')')[0].split(',')
for i, item in enumerate(input_args):
if 'f_path' in item:
input_args[i] = f'f_path = \"./media/{user_id}.csv\"'
input_arg = ','.join(input_args)
code = ''.join([code.split('(')[0], '(', input_arg, ')', ')'.join(code.split(')')[1:])])
# write essentials and pipeline codes to executable function python file. add trace wrapper above function declare line.
with open(f'{user_id}.py', 'w+') as f:
f.write(essentials)
f.write(f"""@tracer(cat_col = {cat_target}, numerical_col = {num_target}, sensi_atts={cat_target}, target_name = \"{target_name}\", training=True, save_path=\"{save_path}\", dag_save=\"svg\")\n{code}\n""")
f.write(f"pipeline = {function_title}")
os.system(f"python {user_id}.py")
img = save_path + "/DAG/pipeline.svg"
with open(img, 'r') as content:
svg = content.read()
with open(f'templates/{user_id}.html', 'w+') as f:
f.write("{% extends 'index1.html' %}\n")
f.write("{% block content %}\n")
f.write(svg)
f.write('\n')
f.write("{% endblock %}\n")
# load saved intermediate dict files generated from executable function python file.
log_dict = pickle.load(open(save_path+"/checkpoints/log_dict_train.p", 'rb'))
rand_rgb = pickle.load(open(save_path+"/checkpoints/rand_color_train.p", 'rb'))
rand_rgb = pickle.load(open(save_path+"/checkpoints/rand_color_train.p", 'rb'))
plot_dict = pickle.load(open(save_path+"/checkpoints/plot_dict_train.p", 'rb'))
target_df = pickle.load(open(save_path+"/checkpoints/target_df_train.p", 'rb'))
return redirect(url_for('home'))
return render_template("login_2.html", error = error, script_name = script_name)
@app.route('/', methods=['GET'])
@login_required
def home():
"""
Main Function Flask function
html adopts hierachical format. child html file takes care of DAG visualization while parent html deals with dynamic changes in codes, dag color, tables and histograms.
In Main home page:
Display user information in head row
Display raw pipeline code. Change color w.r.t click events
Display DAG generated from pipeline code. Change color w.r.t click events
Display intermediate changes in both static lables and population stats
Display visualization of changes in static lables and performance labels
Returns:
url for main home page.
"""
selected_status = request.args.get('type')
if selected_status is not None:
cache.append(selected_status)
corr_color = [rand_rgb[int(step)] for step in cache]
plots = {}
to_plot = []
code_with_color = ""
# variable initilization
tables_to_display, titles, labels, code_titles, plt_xs, plt_ys, plt_titles, plt_xas, plt_yas, plot_log_changes = [], [], [], [], [], [], [], [], [], []
# cache is used to store user click events
for status in cache:
if 'Classifier' in int_to_string(int(status)):
label_inverse = {1: '<=50K', 0:'>50K'}
target_df[target_name].replace(label_inverse, inplace = True)
target_df['pred_'+target_name].replace(label_inverse, inplace = True)
plt_titles.insert(0, 'Performance Label')
to_plot.insert(0, (get_performance_label(target_df, cat_target, target_name, pos_group), perform_target))
to_plot.append(static_label(target_df, cat_target, target_name))
plot_log_changes.append(pd.DataFrame(static_label(target_df, cat_target, target_name)))
else:
to_plot.append(sort_dict_key(plot_dict[int(status)]))
plot_log_changes.append(pd.DataFrame(sort_dict_key(plot_dict[int(status)])))
# display tables
if int(status) in log_dict.keys():
temp_table = log_dict[int(status)]
if len(plot_log_changes) == 1:
tables_to_display.append('No changes')
else:
if plot_log_changes[-1].equals(plot_log_changes[-2]):
tables_to_display.append('No changes')
else:
tables_to_display.append((plot_log_changes[-1] - plot_log_changes[-2]).to_html(classes = 'table table-striped'))
for key, dataframe in temp_table.items():
tables_to_display.append(dataframe.to_html(classes = 'table table-striped'))
num_cat = "NUMERICAL features" if key == 'num' else "CATEGORICAL features"
titles.append(int_to_string(int(status)))
labels.append(' -- Static Label, show changes in percentage')
titles.append(int_to_string(int(status)))
labels.append(" -- TARGET changed in "+num_cat)
code_titles.append(int_to_string(int(status)))
# start_plotly
if key == 'cat':
plt_titles.append('INSPECTING ' + int_to_string(int(status)))
else:
plt_titles.append('INSPECTING ' + int_to_string(int(status)))
else:
if len(plot_log_changes) == 1:
tables_to_display.append('No changes')
else:
if plot_log_changes[-1].equals(plot_log_changes[-2]):
tables_to_display.append('No changes')
else:
tables_to_display.append((plot_log_changes[-1] - plot_log_changes[-2]).to_html(classes = 'table table-striped'))
tables_to_display.append('No changes')
titles.append(int_to_string(int(status)))
labels.append(' -- Static Label, show changes in percentage')
titles.append(int_to_string(int(status)))
labels.append('')
code_titles.append(int_to_string(int(status)))
plt_titles.append('INSPECTING ' + int_to_string(int(status)))
plots = create_hist_sub_plot(to_plot[::-1], plt_titles[::-1], pos_group)
# change code color w.r.t click events
code_with_color = change_code_color(corr_color, code_titles, code)
template_to_render = user_id if demo=="USER" else demo
# parse variables to html file.
return render_template(f'{template_to_render}.html',
plots = plots, tables = tables_to_display[::-1], titles = titles[::-1],
labels = labels[::-1], colors = np.array(corr_color[::-1]).repeat(2).tolist(),
code = code_with_color, name = name, org = organization, script_name = script_name)
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
flash('You were just logged out')
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
| [
"json.dump",
"flask.flash",
"flask.session.pop",
"flask.request.args.get",
"warnings.filterwarnings",
"flask.Flask",
"os.system",
"werkzeug.utils.secure_filename",
"uuid.uuid1",
"flask.url_for",
"flask.request.form.to_dict",
"numpy.array",
"functools.wraps",
"flask.render_template",
"os.... | [((924, 957), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (947, 957), False, 'import warnings\n'), ((965, 980), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (970, 980), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((1004, 1016), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1014, 1016), False, 'import uuid\n'), ((1090, 1118), 'os.getenv', 'os.getenv', (['"""SCRIPT_NAME"""', '""""""'], {}), "('SCRIPT_NAME', '')\n", (1099, 1118), False, 'import os\n'), ((4788, 4796), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (4793, 4796), False, 'from functools import wraps\n'), ((12123, 12192), 'flask.render_template', 'render_template', (['"""login_2.html"""'], {'error': 'error', 'script_name': 'script_name'}), "('login_2.html', error=error, script_name=script_name)\n", (12138, 12192), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((12928, 12952), 'flask.request.args.get', 'request.args.get', (['"""type"""'], {}), "('type')\n", (12944, 12952), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((16991, 17021), 'flask.session.pop', 'session.pop', (['"""logged_in"""', 'None'], {}), "('logged_in', None)\n", (17002, 17021), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((17026, 17059), 'flask.flash', 'flash', (['"""You were just logged out"""'], {}), "('You were just logged out')\n", (17031, 17059), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((7986, 8018), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {'flat': '(False)'}), '(flat=False)\n', (8006, 8018), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((8122, 8154), 'flask.flash', 'flash', (['"""You were just logged in"""'], {}), "('You were just logged in')\n", (8127, 8154), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((11136, 11169), 'os.system', 'os.system', (['f"""python {user_id}.py"""'], {}), "(f'python {user_id}.py')\n", (11145, 11169), False, 'import os\n'), ((17080, 17096), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (17087, 17096), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((5084, 5116), 'flask.flash', 'flash', (['"""You need to login first"""'], {}), "('You need to login first')\n", (5089, 5116), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((8086, 8112), 'json.dump', 'json.dump', (['to_json_dict', 'f'], {}), '(to_json_dict, f)\n', (8095, 8112), False, 'import json\n'), ((9918, 9943), 'flask.flash', 'flash', (['"""No selected File"""'], {}), "('No selected File')\n", (9923, 9943), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((10034, 10064), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (10049, 10064), False, 'from werkzeug.utils import secure_filename\n'), ((12095, 12110), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (12102, 12110), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((5145, 5161), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (5152, 5161), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((9510, 9525), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (9517, 9525), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((9972, 9992), 'flask.url_for', 'url_for', (['"""logged_in"""'], {}), "('logged_in')\n", (9979, 9992), False, 'from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup\n'), ((16794, 16820), 'numpy.array', 'np.array', (['corr_color[::-1]'], {}), '(corr_color[::-1])\n', (16802, 16820), True, 'import numpy as np\n')] |
# Computes expected results for `testGRU()` in `Tests/TensorFlowTests/LayerTests.swift`.
# Requires 'tensorflow>=2.0.0a0' (e.g. "pip install tensorflow==2.2.0").
import sys
import numpy
import tensorflow as tf
# Set random seed for repetable results
tf.random.set_seed(0)
def indented(s):
return '\n'.join([' ' + l for l in s.split('\n')])
def swift_tensor(name, tensor):
if hasattr(tensor, 'numpy'):
tensor = tensor.numpy()
def format_float(x):
formatted = numpy.format_float_positional(x, unique=True)
if formatted[-1] == '.':
return formatted + '0'
return formatted
formatter = {
'float_kind': format_float
}
return 'let {} = Tensor<Float>(\n{}\n)'.format(
name,
indented(numpy.array2string(tensor, separator=',', formatter=formatter)))
units = 4
input_dim = 3
input_length = 4
go_backwards = "go_backwards" in sys.argv
# Initialize the keras model with the GRU.
gru = tf.keras.layers.GRU(
input_dim=input_dim,
units=units,
activation="tanh", recurrent_activation="sigmoid",
return_sequences=True, return_state=True,
go_backwards=go_backwards)
x_input = tf.keras.Input(shape=[input_length, input_dim])
initial_state = tf.keras.Input(shape=[units])
initial_state_input = [initial_state]
output = gru(x_input, initial_state=initial_state_input)
model = tf.keras.Model(inputs=[x_input, initial_state_input], outputs=[output])
[kernel, recurrent_kernel, bias] = gru.get_weights()
update_kernel = kernel[:, :units]
update_recurrent_kernel = recurrent_kernel[:, :units]
reset_kernel = kernel[:, units: units * 2]
reset_recurrent_kernel = recurrent_kernel[:, units: units * 2]
new_kernel = kernel[:, units * 2:]
new_recurrent_kernel = recurrent_kernel[:, units * 2:]
update_bias = bias[0][:units]
update_recurrent_bias = bias[1][:units]
reset_bias = bias[0][units: units * 2]
reset_recurrent_bias = bias[1][units: units * 2]
new_bias = bias[0][units * 2:]
new_recurrent_bias = bias[1][units * 2:]
# Print the GRU weights.
print(swift_tensor('updateKernel', update_kernel))
print(swift_tensor('resetKernel', reset_kernel))
print(swift_tensor('outputKernel', new_kernel))
print(swift_tensor('updateRecurrentKernel', update_recurrent_kernel))
print(swift_tensor('resetRecurrentKernel', reset_recurrent_kernel))
print(swift_tensor('outputRecurrentKernel', new_recurrent_kernel))
print(swift_tensor('updateBias', update_bias))
print(swift_tensor('resetBias', reset_bias))
print(swift_tensor('outputBias', new_bias))
print(swift_tensor('updateRecurrentBias', update_recurrent_bias))
print(swift_tensor('resetRecurrentBias', reset_recurrent_bias))
print(swift_tensor('outputRecurrentBias', new_recurrent_bias))
# Initialize input data and print it.
x = tf.keras.initializers.GlorotUniform()(shape=[1, input_length, input_dim])
initial_state = [
tf.keras.initializers.GlorotUniform()(shape=[1, units]),
]
print(swift_tensor('x', x))
print(swift_tensor('initialState', initial_state[0]))
# Run forwards and backwards pass and print the results.
with tf.GradientTape() as tape:
tape.watch(x)
tape.watch(initial_state)
[[states, final_state]] = model([x, initial_state])
sum_output = tf.reduce_sum(states[0][-1])
[grad_model, grad_x, grad_initial_state] = tape.gradient(sum_output, [model.variables, x, initial_state])
[grad_kernel, grad_recurrent_kernel, grad_bias] = grad_model
[grad_initial_state] = grad_initial_state
grad_update_kernel = grad_kernel[:, :units]
grad_update_recurrent_kernel = grad_recurrent_kernel[:, :units]
grad_reset_kernel = grad_kernel[:, units: units * 2]
grad_reset_recurrent_kernel = grad_recurrent_kernel[:, units: units * 2]
grad_new_kernel = grad_kernel[:, units * 2:]
grad_new_recurrent_kernel = grad_recurrent_kernel[:, units * 2:]
grad_update_bias = grad_bias[0][:units]
grad_update_recurrent_bias = grad_bias[1][:units]
grad_reset_bias = grad_bias[0][units: units * 2]
grad_reset_recurrent_bias = grad_bias[1][units: units * 2]
grad_new_bias = grad_bias[0][units * 2:]
grad_new_recurrent_bias = grad_bias[1][units * 2:]
print(swift_tensor('expectedSum', sum_output))
print(swift_tensor('expectedStates', states))
print(swift_tensor('expectedFinalState', final_state))
print(swift_tensor('expectedGradX', grad_x))
print(swift_tensor('expectedGradInitialState', grad_initial_state))
print(swift_tensor('expectedGradUpdateKernel', grad_update_kernel))
print(swift_tensor('expectedGradResetKernel', grad_reset_kernel))
print(swift_tensor('expectedGradOutputKernel', grad_new_kernel))
print(swift_tensor('expectedGradUpdateRecurrentKernel', grad_update_recurrent_kernel))
print(swift_tensor('expectedGradResetRecurrentKernel', grad_reset_recurrent_kernel))
print(swift_tensor('expectedGradOutputRecurrentKernel', grad_new_recurrent_kernel))
print(swift_tensor('expectedGradUpdateBias', grad_update_bias))
print(swift_tensor('expectedGradResetBias', grad_reset_bias))
print(swift_tensor('expectedGradOutputBias', grad_new_bias))
print(swift_tensor('expectedGradUpdateRecurrentBias', grad_update_recurrent_bias))
print(swift_tensor('expectedGradResetRecurrentBias', grad_reset_recurrent_bias))
print(swift_tensor('expectedGradOutputRecurrentBias', grad_new_recurrent_bias))
| [
"tensorflow.random.set_seed",
"tensorflow.reduce_sum",
"numpy.format_float_positional",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.Input",
"numpy.array2string",
"tensorflow.keras.Model",
"tensorflow.keras.initializers.GlorotUniform",
"tensorflow.GradientTape"
] | [((252, 273), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (270, 273), True, 'import tensorflow as tf\n'), ((973, 1155), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', ([], {'input_dim': 'input_dim', 'units': 'units', 'activation': '"""tanh"""', 'recurrent_activation': '"""sigmoid"""', 'return_sequences': '(True)', 'return_state': '(True)', 'go_backwards': 'go_backwards'}), "(input_dim=input_dim, units=units, activation='tanh',\n recurrent_activation='sigmoid', return_sequences=True, return_state=\n True, go_backwards=go_backwards)\n", (992, 1155), True, 'import tensorflow as tf\n'), ((1180, 1227), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '[input_length, input_dim]'}), '(shape=[input_length, input_dim])\n', (1194, 1227), True, 'import tensorflow as tf\n'), ((1245, 1274), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '[units]'}), '(shape=[units])\n', (1259, 1274), True, 'import tensorflow as tf\n'), ((1379, 1450), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[x_input, initial_state_input]', 'outputs': '[output]'}), '(inputs=[x_input, initial_state_input], outputs=[output])\n', (1393, 1450), True, 'import tensorflow as tf\n'), ((2771, 2808), 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {}), '()\n', (2806, 2808), True, 'import tensorflow as tf\n'), ((3071, 3088), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3086, 3088), True, 'import tensorflow as tf\n'), ((3219, 3247), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['states[0][-1]'], {}), '(states[0][-1])\n', (3232, 3247), True, 'import tensorflow as tf\n'), ((493, 538), 'numpy.format_float_positional', 'numpy.format_float_positional', (['x'], {'unique': '(True)'}), '(x, unique=True)\n', (522, 538), False, 'import numpy\n'), ((2867, 2904), 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {}), '()\n', (2902, 2904), True, 'import tensorflow as tf\n'), ((774, 836), 'numpy.array2string', 'numpy.array2string', (['tensor'], {'separator': '""","""', 'formatter': 'formatter'}), "(tensor, separator=',', formatter=formatter)\n", (792, 836), False, 'import numpy\n')] |
import matplotlib.pyplot as plt
from skimage import exposure
import numpy as np
def plot_img_and_mask(img, mask):
img = np.array(img)
img = (img - img.min()) / (img.max() - img.min())
img = exposure.equalize_adapthist(img)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.set_title('Input image')
ax.imshow(img, cmap="gray")
ax.contour(mask, colors="red")
plt.xticks([]), plt.yticks([])
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.yticks",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"skimage.exposure.equalize_adapthist"
] | [((126, 139), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (134, 139), True, 'import numpy as np\n'), ((204, 236), 'skimage.exposure.equalize_adapthist', 'exposure.equalize_adapthist', (['img'], {}), '(img)\n', (231, 236), False, 'from skimage import exposure\n'), ((252, 288), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (264, 288), True, 'import matplotlib.pyplot as plt\n'), ((427, 437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (435, 437), True, 'import matplotlib.pyplot as plt\n'), ((392, 406), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (402, 406), True, 'import matplotlib.pyplot as plt\n'), ((408, 422), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (418, 422), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import os
import glob
import numpy as np
import pytest
from pyDeltaRCM.model import DeltaModel
from pyDeltaRCM import shared_tools
# utilities for file writing
def create_temporary_file(tmp_path, file_name):
d = tmp_path / 'configs'
d.mkdir(parents=True, exist_ok=True)
p = d / file_name
f = open(p, "w")
return p, f
def write_parameter_to_file(f, varname, varvalue):
f.write(varname + ': ' + str(varvalue) + '\n')
def write_matrix_to_file(f, keys, lists):
# assert len(keys) == len(lists)
f.write('matrix' + ': ' + '\n')
for i in range(len(keys)):
f.write(' ' + keys[i] + ': ' + '\n')
for j in range(len(lists[i])):
f.write(' ' + '- ' + str(lists[i][j]) + '\n')
def write_set_to_file(f, set_list):
f.write('set' + ': ' + '\n')
for i, _set in enumerate(set_list):
f.write(' - {')
for j, (k, v) in enumerate(_set.items()):
f.write(k + ': ' + str(v) + ', ')
f.write('}' + '\n')
def yaml_from_dict(tmp_path, file_name, _dict=None):
p, f = create_temporary_file(tmp_path, file_name)
if (_dict is None):
_dict = {'out_dir': tmp_path / 'out_dir'}
elif ('out_dir' not in _dict.keys()):
_dict['out_dir'] = tmp_path / 'out_dir'
for k in _dict.keys():
write_parameter_to_file(f, k, _dict[k])
f.close()
return p
@pytest.fixture(scope='function')
def test_DeltaModel(tmp_path):
file_name = 'user_parameters.yaml'
p, f = create_temporary_file(tmp_path, file_name)
write_parameter_to_file(f, 'out_dir', tmp_path / 'out_dir')
write_parameter_to_file(f, 'Length', 10.0)
write_parameter_to_file(f, 'Width', 10.0)
write_parameter_to_file(f, 'seed', 0)
write_parameter_to_file(f, 'dx', 1.0)
write_parameter_to_file(f, 'L0_meters', 1.0)
write_parameter_to_file(f, 'S0', 0.0002)
write_parameter_to_file(f, 'itermax', 1)
write_parameter_to_file(f, 'Np_water', 10)
write_parameter_to_file(f, 'u0', 1.0)
write_parameter_to_file(f, 'N0_meters', 2.0)
write_parameter_to_file(f, 'h0', 1.0)
write_parameter_to_file(f, 'H_SL', 0.0)
write_parameter_to_file(f, 'SLR', 0.001)
write_parameter_to_file(f, 'Np_sed', 10)
write_parameter_to_file(f, 'f_bedload', 0.5)
write_parameter_to_file(f, 'C0_percent', 0.1)
write_parameter_to_file(f, 'toggle_subsidence', False)
write_parameter_to_file(f, 'subsidence_rate', 0.0)
write_parameter_to_file(f, 'start_subsidence', 50.)
write_parameter_to_file(f, 'save_eta_figs', False)
write_parameter_to_file(f, 'save_stage_figs', False)
write_parameter_to_file(f, 'save_depth_figs', False)
write_parameter_to_file(f, 'save_discharge_figs', False)
write_parameter_to_file(f, 'save_velocity_figs', False)
write_parameter_to_file(f, 'save_eta_grids', False)
write_parameter_to_file(f, 'save_stage_grids', False)
write_parameter_to_file(f, 'save_depth_grids', False)
write_parameter_to_file(f, 'save_discharge_grids', False)
write_parameter_to_file(f, 'save_velocity_grids', False)
write_parameter_to_file(f, 'save_dt', 500)
f.close()
_delta = DeltaModel(input_file=p)
return _delta
class FastIteratingDeltaModel:
"""A Fast iterating DeltaModel
This class is useful in patching the DeltaModel for timing tests. The
patched DeltaModel uses the random number generation internally, so it
will verify functionality in any checkpointing scenarios, and overwriting
only the `solve_water_and_sediment_timestep` method removes most of the jitting compilation
time and much of the actual computation time.
"""
def solve_water_and_sediment_timestep(self):
"""PATCH"""
def _get_random_field(shp):
"""Get a field or randoms using the shared function.
It is critical to use the `shared_tools.get_random_uniform` for
reproducibility.
"""
field = np.zeros(shp, dtype=np.float32)
for i in range(shp[0]):
for j in range(shp[1]):
field[i, j] = shared_tools.get_random_uniform(1)
return field
shp = self.eta.shape
self.eta += _get_random_field(shp)
self.uw += _get_random_field(shp)
self.ux += _get_random_field(shp)
self.uy += _get_random_field(shp)
self.depth += _get_random_field(shp)
self.stage += _get_random_field(shp)
def read_endtime_from_log(log_folder):
_logs = glob.glob(os.path.join(log_folder, '*.log'))
assert len(_logs) == 1 # log file exists
with open(_logs[0], 'r') as _logfile:
_lines = _logfile.readlines()
_t = 0
for i, _line in enumerate(_lines):
if 'Time: ' in _line:
_t = _line.split(' ')[6]
_t = _t.strip(' ;')
return float(_t)
| [
"pyDeltaRCM.model.DeltaModel",
"numpy.zeros",
"pytest.fixture",
"pyDeltaRCM.shared_tools.get_random_uniform",
"os.path.join"
] | [((1390, 1422), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1404, 1422), False, 'import pytest\n'), ((3169, 3193), 'pyDeltaRCM.model.DeltaModel', 'DeltaModel', ([], {'input_file': 'p'}), '(input_file=p)\n', (3179, 3193), False, 'from pyDeltaRCM.model import DeltaModel\n'), ((4530, 4563), 'os.path.join', 'os.path.join', (['log_folder', '"""*.log"""'], {}), "(log_folder, '*.log')\n", (4542, 4563), False, 'import os\n'), ((3976, 4007), 'numpy.zeros', 'np.zeros', (['shp'], {'dtype': 'np.float32'}), '(shp, dtype=np.float32)\n', (3984, 4007), True, 'import numpy as np\n'), ((4118, 4152), 'pyDeltaRCM.shared_tools.get_random_uniform', 'shared_tools.get_random_uniform', (['(1)'], {}), '(1)\n', (4149, 4152), False, 'from pyDeltaRCM import shared_tools\n')] |
#!/usr/bin/env python
# coding: utf-8
# Many thanks for <EMAIL> & <EMAIL> for their orginal work and allowing me to share!
import argparse
import numpy as np
import torch
import torch.nn as nn
import joblib
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import RobustScaler
from torch.utils.data import DataLoader, TensorDataset
from uda_model import UDAModel
def write_epm_file(preds, truth, epm_fname):
import uproot3
pred_tags = preds.squeeze()
epm_tags = np.where(pred_tags > 0.5, 1, -1).astype(np.int32)
true_id = np.where(truth.squeeze() == 0, -511, 511).astype(np.int32)
eta = np.where(pred_tags > 0.5, 1 - pred_tags, pred_tags)
with uproot3.recreate(f"{epm_fname}.root", compression=None) as file:
file["DecayTree"] = uproot3.newtree({"B_TRUEID": np.int32, "tag": np.int32, "eta": np.float64})
t = file["DecayTree"]
t["B_TRUEID"].newbasket(true_id)
t["tag"].newbasket(epm_tags)
t["eta"].newbasket(eta)
# logging utilities: pretty-printing of shapes and tag frequencies
def format_shapes(features, tags, idx, borders):
return f"features {tuple(features.size())} tags {tuple(tags.size())} idx {tuple(idx.size())} borders ({len(borders)},)"
def format_tag_frequencies(tags):
return ', '.join(map(lambda x: f"{x[0]}({x[1]})", torch.stack(torch.unique(tags, return_counts=True)).type(torch.int).t()))
# like itertools.cycle but reshuffling a DataLoader instance in each cycle
class ShuffleCycle(object):
def __init__(self, dataloader):
self.dataloader = dataloader
self.iter = iter(self.dataloader)
def __iter__(self):
return self
def __next__(self):
try:
return next(self.iter)
except StopIteration:
self.iter = iter(self.dataloader)
return next(self.iter)
def train_model(files, validation_files, model_out_name, scaler_out_name, n_epochs, train_frac, batch_size, make_epm_output, gamma=10, dc_weight=0.2):
print("Starting Training")
# some torch setup
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
torch.manual_seed(25031992)
torch.cuda.manual_seed(25031992)
features = np.concatenate([f["features"] for f in files])
tags = np.concatenate([f["B_TRUEID"] for f in files]).reshape((-1, 1))
tags = np.where(tags == 521, 1, 0).astype(np.int32)
evt_borders = files[0]["evt_borders"]
for f in files[1:]:
evt_borders = np.concatenate((evt_borders, f["evt_borders"][1:] + evt_borders[-1]))
assert evt_borders[-1] == len(features)
# probnnmu has a bin at -1, for particles that don't have muon info
# map that to 0
features[features[:, 3] == -1, 3] = 0
# scale data, and safe scaler for later use
scaler = RobustScaler()
features = scaler.fit_transform(features)
joblib.dump(scaler, f"{scaler_out_name}.bin")
borders = np.array(list(zip(evt_borders[:-1], evt_borders[1:])))
idx_vec = np.zeros(len(features), dtype=np.int64)
for i, (b, e) in enumerate(borders):
idx_vec[b:e] = i
evt_split = int(len(borders) * train_frac)
track_split = evt_borders[evt_split]
train_tags = torch.tensor(tags[:evt_split], dtype=torch.float32).to(device)
train_feat = torch.tensor(features[:track_split]).to(device)
train_idx = torch.tensor(idx_vec[:track_split]).to(device)
test_tags_np = tags[evt_split:]
test_tags = torch.tensor(test_tags_np, dtype=torch.float32).to(device)
test_feat = torch.tensor(features[track_split:]).to(device)
test_idx = torch.tensor(idx_vec[track_split:]).to(device)
train_borders = [
(x[0, 0], x[-1, 1])
for x in np.array_split(borders[:evt_split], len(borders[:evt_split]) // batch_size)
]
test_borders = [
(x[0, 0], x[-1, 1])
for x in np.array_split(borders[evt_split:] - borders[evt_split][0], len(borders[evt_split:]) // batch_size)
]
# UDA: process the validation_files equivalently; use "B_ID" instead of "B_TRUEID" and do not split
val_features = np.concatenate([f["features"] for f in validation_files])
val_tags = np.concatenate([f["B_ID"] for f in validation_files]).reshape((-1, 1))
val_tags = np.where(val_tags == 521, 1, 0).astype(np.int32)
val_evt_borders = validation_files[0]["evt_borders"]
for f in validation_files[1:]:
val_evt_borders = np.concatenate((val_evt_borders, f["evt_borders"][1:] + val_evt_borders[-1]))
assert val_evt_borders[-1] == len(val_features)
val_features[val_features[:, 3] == -1, 3] = 0 # probnnmu (see above)
val_features = scaler.transform(val_features)
val_borders = np.array(list(zip(val_evt_borders[:-1], val_evt_borders[1:])))
val_idx_vec = np.zeros(len(val_features), dtype=np.int64)
for i, (b, e) in enumerate(val_borders):
val_idx_vec[b:e] = i
val_evt_split = int(len(val_borders) * train_frac)
val_track_split = val_evt_borders[val_evt_split]
val_train_tags = torch.tensor(val_tags[:val_evt_split], dtype=torch.float32).to(device)
val_train_feat = torch.tensor(val_features[:val_track_split]).to(device)
val_train_idx = torch.tensor(val_idx_vec[:val_track_split]).to(device)
val_test_tags_np = val_tags[val_evt_split:]
val_test_tags = torch.tensor(val_test_tags_np, dtype=torch.float32).to(device)
val_test_feat = torch.tensor(val_features[val_track_split:]).to(device)
val_test_idx = torch.tensor(val_idx_vec[val_track_split:]).to(device)
val_train_borders = [
(x[0, 0], x[-1, 1])
for x in np.array_split(val_borders[:val_evt_split], len(val_borders[:val_evt_split]) // batch_size)
]
val_test_borders = [
(x[0, 0], x[-1, 1])
for x in np.array_split(val_borders[val_evt_split:] - val_borders[val_evt_split][0], len(val_borders[val_evt_split:]) // batch_size)
]
print(
f"MC training shapes: {format_shapes(train_feat, train_tags, train_idx, train_borders)}",
f"MC testing shapes: {format_shapes(test_feat, test_tags, test_idx, test_borders)}",
f"Data training shapes: {format_shapes(val_train_feat, val_train_tags, val_train_idx, val_train_borders)}",
f"Data testing shapes: {format_shapes(val_test_feat, val_test_tags, val_test_idx, val_test_borders)}",
f"MC training tag frequencies: {format_tag_frequencies(train_tags)}",
f"MC testing tag frequencies: {format_tag_frequencies(test_tags)}",
f"Data training tag frequencies: {format_tag_frequencies(val_train_tags)}",
f"Data testing tag frequencies: {format_tag_frequencies(val_test_tags)}",
sep="\n"
) # log some general statistics about the data sources
model = UDAModel().to(device)
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, min_lr=1e-5, patience=5)
all_train_loss = []
all_test_loss = []
all_test_acc = []
mypreds = np.zeros((len(test_tags), 1))
all_val_loss = []
all_val_acc = []
valpreds = np.zeros((len(val_test_tags), 1))
all_train_domain_loss = []
all_test_domain_loss = []
# torch data loaders reshuffle the data in each epoch
train_dl = DataLoader(
TensorDataset(torch.tensor(train_borders, dtype=torch.int)),
shuffle = True,
batch_size = None
)
val_train_dl = DataLoader(
TensorDataset(torch.tensor(val_train_borders, dtype=torch.int)),
shuffle = True,
batch_size = None
)
for epoch in range(n_epochs):
model.train()
trainloss = 0
fullloss = 0
# progress and alpha value for the Gradient Reversal Layer
p = float(epoch) / n_epochs
alpha = 2. / (1. + np.exp(-gamma * p)) - 1
for batch_idx, (batch_border, val_batch_border) in enumerate(zip(train_dl, ShuffleCycle(val_train_dl))):
beg, end = batch_border[0].numpy() # unpack the borders from train_dl
val_beg, val_end = val_batch_border[0].numpy()
optimizer.zero_grad()
data = train_feat[beg:end]
idx = train_idx[beg:end] - train_idx[beg]
e_beg, e_end = train_idx[[beg, end - 1]]
# one past the last event is the boundary
e_end += 1
target = train_tags[e_beg:e_end]
output, uda_output = model(data, idx, alpha)
loss = nn.functional.binary_cross_entropy_with_logits(output, target)
trainloss += loss.detach().cpu().numpy()
# UDA: feed the real data into the model
data = val_train_feat[val_beg:val_end]
idx = val_train_idx[val_beg:val_end] - val_train_idx[val_beg]
_, val_uda_output = model(data, idx, alpha)
# UDA: add the domain loss
loss += nn.functional.binary_cross_entropy_with_logits(
uda_output,
torch.zeros_like(uda_output) # expect zeros
) * dc_weight / 2
loss += nn.functional.binary_cross_entropy_with_logits(
val_uda_output,
torch.ones_like(val_uda_output) # expect ones
) * dc_weight / 2
fullloss += loss.detach().cpu().numpy()
loss.backward()
optimizer.step()
# averaged trainloss of epoch
all_train_loss.append(trainloss / (batch_idx + 1))
all_train_domain_loss.append((fullloss - trainloss) / (batch_idx + 1) / dc_weight)
model.eval()
test_loss = 0 # validation loss on source domain (= MC) data
domain_loss = 0 # validation loss of the domain classifier
for batch_idx, (beg, end) in enumerate(test_borders):
data = test_feat[beg:end]
# indices for the index_add inside the forward()
idx = test_idx[beg:end] - test_idx[beg]
# minus to make the test_idx start at 0 since we are indexing into
# the split off test_tags array
e_beg, e_end = test_idx[[beg, end - 1]] - test_idx[0]
# one past the last event is the boundary
e_end += 1
target = test_tags[e_beg:e_end]
with torch.no_grad():
output, uda_output = model(data, idx, alpha)
mypreds[e_beg:e_end] = torch.sigmoid(output.detach()).cpu().numpy()
test_loss += nn.functional.binary_cross_entropy_with_logits(output, target).detach().cpu().numpy()
domain_loss += nn.functional.binary_cross_entropy_with_logits(
uda_output,
torch.zeros_like(uda_output) # expect zeros
).detach().cpu().numpy()
test_acc = np.mean((mypreds > 0.5) == test_tags_np)
all_test_loss.append(test_loss / (batch_idx + 1))
all_test_acc.append(test_acc)
# process the validation_files equivalently
val_loss = 0 # validation loss on target domain (= real) data
for val_batch_idx, (beg, end) in enumerate(val_test_borders):
data = val_test_feat[beg:end]
idx = val_test_idx[beg:end] - val_test_idx[beg]
e_beg, e_end = val_test_idx[[beg, end - 1]] - val_test_idx[0]
e_end += 1
target = val_test_tags[e_beg:e_end]
with torch.no_grad():
output, uda_output = model(data, idx, alpha)
valpreds[e_beg:e_end] = torch.sigmoid(output.detach()).cpu().numpy()
val_loss += nn.functional.binary_cross_entropy_with_logits(output, target).detach().cpu().numpy()
domain_loss += nn.functional.binary_cross_entropy_with_logits(
uda_output,
torch.ones_like(uda_output) # expect ones
).detach().cpu().numpy()
val_acc = np.mean((valpreds > 0.5) == val_test_tags_np)
all_val_loss.append(val_loss / (val_batch_idx + 1))
all_val_acc.append(val_acc)
all_test_domain_loss.append(domain_loss / (len(test_borders) + len(val_test_borders)))
scheduler.step(test_loss / (batch_idx + 1))
print(
f"Epoch: {epoch}/{n_epochs} | MC loss {test_loss/(batch_idx+1):.5f} | MC AUC: {roc_auc_score(test_tags_np, mypreds):.5f} | MC ACC: {test_acc:.5f}",
f"| data loss {val_loss/(val_batch_idx+1):.5f} | data AUC: {roc_auc_score(val_test_tags_np, valpreds):.5f} | data ACC: {val_acc:.5f}",
end="\r",
)
print("Training complete")
print(f"Minimum MC testing loss: {min(all_test_loss):.5f} in epoch: {np.argmin(all_test_loss)}")
print(f"Maximum MC testing ACC: {max(all_test_acc):.5f} in epoch: {np.argmax(all_test_acc)}")
print(f"Minimum data loss: {min(all_val_loss):.5f} in epoch: {np.argmin(all_val_loss)}")
print(f"Maximum data ACC: {max(all_val_acc):.5f} in epoch: {np.argmax(all_val_acc)}")
# done training so let's set it to eval
model.eval()
torch.save(model.state_dict(), f"{model_out_name}.pt")
if make_epm_output:
print("Writing output for EPM")
try:
write_epm_file(mypreds, test_tags_np, f"{model_out_name}_epm")
except ImportError:
print("Option make-epm-output requires uproot3 package to be available.\n Writing of EPM output skipped!")
print("Making plots.")
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({"font.size": 22})
plt.figure(figsize=(16, 9))
plt.plot(all_train_loss, label="MC training loss")
plt.plot(all_test_loss, label="MC validation loss")
plt.plot(all_val_loss, label="data validation loss")
plt.plot(all_train_domain_loss, label="domain training loss", linestyle="dashed")
plt.plot(all_test_domain_loss, label="domain validation loss", linestyle="dashed")
plt.legend()
plt.xlabel("Epoch")
plt.ylim(0.6, 0.8)
plt.grid()
plt.savefig("uda_Loss_vs_Epoch.png")
plt.figure(figsize=(16, 9))
plt.plot(all_test_acc, label="MC validation accuracy")
plt.plot(all_val_acc, label="data validation accuracy")
plt.legend()
plt.xlabel("Epoch")
plt.ylim(0.48, 0.64)
plt.grid()
plt.savefig("uda_Accuracy_vs_Epoch.png")
def restricted_float(x):
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError(f"{x} not a floating-point literal")
if x <= 0.0 or x > 1.0:
raise argparse.ArgumentTypeError(f"{x} not in range (0.0, 1.0]")
return x
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train Model for Flavour Tagging.")
parser.add_argument("filenames", nargs="+", help="Files that contain training data. *.npz files expected)")
parser.add_argument_group("required named arguments").add_argument(
"-validate",
nargs="+",
help="Files that contain validation data of the target domain",
required=True
) # https://stackoverflow.com/a/24181138/11567260
parser.add_argument(
"-model-out-name",
default="uda_model",
help="File name to save weights into. Default is model.pt",
)
parser.add_argument(
"-scaler-out-name",
default=None,
help="File name to save scaler into. Default is MODELNAME_scaler.bin",
)
parser.add_argument("-epochs", dest="n_epochs", default=300, type=int, help="Batch size")
parser.add_argument(
"-train-frac",
default=0.75,
type=restricted_float,
help="Fraction of data to use for training",
)
parser.add_argument("-batch-size", default=1000, type=int, help="Batch size")
parser.add_argument("--make-epm-output", action="store_false", help="Write tagged validataion data into root file for EPM")
args = parser.parse_args()
files = [np.load(f) for f in args.filenames]
validation_files = [np.load(f) for f in args.validate]
if args.scaler_out_name == None:
args.scaler_out_name = args.model_out_name + "_scaler"
train_model(
files, validation_files, args.model_out_name, args.scaler_out_name, args.n_epochs, args.train_frac, args.batch_size, args.make_epm_output
)
| [
"numpy.load",
"uda_model.UDAModel",
"argparse.ArgumentParser",
"numpy.argmax",
"joblib.dump",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.exp",
"uproot3.newtree",
"torch.no_grad",
"argparse.ArgumentTypeError",
"matplotlib.rcParams.update",
"torch.optim.lr_scheduler.Red... | [((631, 682), 'numpy.where', 'np.where', (['(pred_tags > 0.5)', '(1 - pred_tags)', 'pred_tags'], {}), '(pred_tags > 0.5, 1 - pred_tags, pred_tags)\n', (639, 682), True, 'import numpy as np\n'), ((2200, 2227), 'torch.manual_seed', 'torch.manual_seed', (['(25031992)'], {}), '(25031992)\n', (2217, 2227), False, 'import torch\n'), ((2232, 2264), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(25031992)'], {}), '(25031992)\n', (2254, 2264), False, 'import torch\n'), ((2281, 2327), 'numpy.concatenate', 'np.concatenate', (["[f['features'] for f in files]"], {}), "([f['features'] for f in files])\n", (2295, 2327), True, 'import numpy as np\n'), ((2859, 2873), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (2871, 2873), False, 'from sklearn.preprocessing import RobustScaler\n'), ((2924, 2969), 'joblib.dump', 'joblib.dump', (['scaler', 'f"""{scaler_out_name}.bin"""'], {}), "(scaler, f'{scaler_out_name}.bin')\n", (2935, 2969), False, 'import joblib\n'), ((4144, 4201), 'numpy.concatenate', 'np.concatenate', (["[f['features'] for f in validation_files]"], {}), "([f['features'] for f in validation_files])\n", (4158, 4201), True, 'import numpy as np\n'), ((6881, 6977), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'factor': '(0.5)', 'min_lr': '(1e-05)', 'patience': '(5)'}), '(optimizer, factor=0.5, min_lr=\n 1e-05, patience=5)\n', (6923, 6977), False, 'import torch\n'), ((13427, 13472), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (13453, 13472), False, 'import matplotlib\n'), ((13478, 13505), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (13488, 13505), True, 'import matplotlib.pyplot as plt\n'), ((13510, 13560), 'matplotlib.pyplot.plot', 'plt.plot', (['all_train_loss'], {'label': '"""MC training loss"""'}), "(all_train_loss, label='MC training loss')\n", (13518, 13560), True, 'import matplotlib.pyplot as plt\n'), ((13565, 13616), 'matplotlib.pyplot.plot', 'plt.plot', (['all_test_loss'], {'label': '"""MC validation loss"""'}), "(all_test_loss, label='MC validation loss')\n", (13573, 13616), True, 'import matplotlib.pyplot as plt\n'), ((13621, 13673), 'matplotlib.pyplot.plot', 'plt.plot', (['all_val_loss'], {'label': '"""data validation loss"""'}), "(all_val_loss, label='data validation loss')\n", (13629, 13673), True, 'import matplotlib.pyplot as plt\n'), ((13678, 13764), 'matplotlib.pyplot.plot', 'plt.plot', (['all_train_domain_loss'], {'label': '"""domain training loss"""', 'linestyle': '"""dashed"""'}), "(all_train_domain_loss, label='domain training loss', linestyle=\n 'dashed')\n", (13686, 13764), True, 'import matplotlib.pyplot as plt\n'), ((13764, 13851), 'matplotlib.pyplot.plot', 'plt.plot', (['all_test_domain_loss'], {'label': '"""domain validation loss"""', 'linestyle': '"""dashed"""'}), "(all_test_domain_loss, label='domain validation loss', linestyle=\n 'dashed')\n", (13772, 13851), True, 'import matplotlib.pyplot as plt\n'), ((13851, 13863), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13861, 13863), True, 'import matplotlib.pyplot as plt\n'), ((13868, 13887), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (13878, 13887), True, 'import matplotlib.pyplot as plt\n'), ((13892, 13910), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.6)', '(0.8)'], {}), '(0.6, 0.8)\n', (13900, 13910), True, 'import matplotlib.pyplot as plt\n'), ((13915, 13925), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (13923, 13925), True, 'import matplotlib.pyplot as plt\n'), ((13930, 13966), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""uda_Loss_vs_Epoch.png"""'], {}), "('uda_Loss_vs_Epoch.png')\n", (13941, 13966), True, 'import matplotlib.pyplot as plt\n'), ((13972, 13999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (13982, 13999), True, 'import matplotlib.pyplot as plt\n'), ((14004, 14058), 'matplotlib.pyplot.plot', 'plt.plot', (['all_test_acc'], {'label': '"""MC validation accuracy"""'}), "(all_test_acc, label='MC validation accuracy')\n", (14012, 14058), True, 'import matplotlib.pyplot as plt\n'), ((14063, 14118), 'matplotlib.pyplot.plot', 'plt.plot', (['all_val_acc'], {'label': '"""data validation accuracy"""'}), "(all_val_acc, label='data validation accuracy')\n", (14071, 14118), True, 'import matplotlib.pyplot as plt\n'), ((14123, 14135), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14133, 14135), True, 'import matplotlib.pyplot as plt\n'), ((14140, 14159), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (14150, 14159), True, 'import matplotlib.pyplot as plt\n'), ((14164, 14184), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.48)', '(0.64)'], {}), '(0.48, 0.64)\n', (14172, 14184), True, 'import matplotlib.pyplot as plt\n'), ((14189, 14199), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (14197, 14199), True, 'import matplotlib.pyplot as plt\n'), ((14204, 14244), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""uda_Accuracy_vs_Epoch.png"""'], {}), "('uda_Accuracy_vs_Epoch.png')\n", (14215, 14244), True, 'import matplotlib.pyplot as plt\n'), ((14562, 14633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Model for Flavour Tagging."""'}), "(description='Train Model for Flavour Tagging.')\n", (14585, 14633), False, 'import argparse\n'), ((693, 748), 'uproot3.recreate', 'uproot3.recreate', (['f"""{epm_fname}.root"""'], {'compression': 'None'}), "(f'{epm_fname}.root', compression=None)\n", (709, 748), False, 'import uproot3\n'), ((786, 861), 'uproot3.newtree', 'uproot3.newtree', (["{'B_TRUEID': np.int32, 'tag': np.int32, 'eta': np.float64}"], {}), "({'B_TRUEID': np.int32, 'tag': np.int32, 'eta': np.float64})\n", (801, 861), False, 'import uproot3\n'), ((2548, 2617), 'numpy.concatenate', 'np.concatenate', (["(evt_borders, f['evt_borders'][1:] + evt_borders[-1])"], {}), "((evt_borders, f['evt_borders'][1:] + evt_borders[-1]))\n", (2562, 2617), True, 'import numpy as np\n'), ((4471, 4548), 'numpy.concatenate', 'np.concatenate', (["(val_evt_borders, f['evt_borders'][1:] + val_evt_borders[-1])"], {}), "((val_evt_borders, f['evt_borders'][1:] + val_evt_borders[-1]))\n", (4485, 4548), True, 'import numpy as np\n'), ((10770, 10810), 'numpy.mean', 'np.mean', (['((mypreds > 0.5) == test_tags_np)'], {}), '((mypreds > 0.5) == test_tags_np)\n', (10777, 10810), True, 'import numpy as np\n'), ((11851, 11896), 'numpy.mean', 'np.mean', (['((valpreds > 0.5) == val_test_tags_np)'], {}), '((valpreds > 0.5) == val_test_tags_np)\n', (11858, 11896), True, 'import numpy as np\n'), ((14446, 14504), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""{x} not in range (0.0, 1.0]"""'], {}), "(f'{x} not in range (0.0, 1.0]')\n", (14472, 14504), False, 'import argparse\n'), ((15831, 15841), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (15838, 15841), True, 'import numpy as np\n'), ((15891, 15901), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (15898, 15901), True, 'import numpy as np\n'), ((498, 530), 'numpy.where', 'np.where', (['(pred_tags > 0.5)', '(1)', '(-1)'], {}), '(pred_tags > 0.5, 1, -1)\n', (506, 530), True, 'import numpy as np\n'), ((2116, 2141), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2139, 2141), False, 'import torch\n'), ((2339, 2385), 'numpy.concatenate', 'np.concatenate', (["[f['B_TRUEID'] for f in files]"], {}), "([f['B_TRUEID'] for f in files])\n", (2353, 2385), True, 'import numpy as np\n'), ((2414, 2441), 'numpy.where', 'np.where', (['(tags == 521)', '(1)', '(0)'], {}), '(tags == 521, 1, 0)\n', (2422, 2441), True, 'import numpy as np\n'), ((3267, 3318), 'torch.tensor', 'torch.tensor', (['tags[:evt_split]'], {'dtype': 'torch.float32'}), '(tags[:evt_split], dtype=torch.float32)\n', (3279, 3318), False, 'import torch\n'), ((3347, 3383), 'torch.tensor', 'torch.tensor', (['features[:track_split]'], {}), '(features[:track_split])\n', (3359, 3383), False, 'import torch\n'), ((3411, 3446), 'torch.tensor', 'torch.tensor', (['idx_vec[:track_split]'], {}), '(idx_vec[:track_split])\n', (3423, 3446), False, 'import torch\n'), ((3511, 3558), 'torch.tensor', 'torch.tensor', (['test_tags_np'], {'dtype': 'torch.float32'}), '(test_tags_np, dtype=torch.float32)\n', (3523, 3558), False, 'import torch\n'), ((3586, 3622), 'torch.tensor', 'torch.tensor', (['features[track_split:]'], {}), '(features[track_split:])\n', (3598, 3622), False, 'import torch\n'), ((3649, 3684), 'torch.tensor', 'torch.tensor', (['idx_vec[track_split:]'], {}), '(idx_vec[track_split:])\n', (3661, 3684), False, 'import torch\n'), ((4217, 4270), 'numpy.concatenate', 'np.concatenate', (["[f['B_ID'] for f in validation_files]"], {}), "([f['B_ID'] for f in validation_files])\n", (4231, 4270), True, 'import numpy as np\n'), ((4303, 4334), 'numpy.where', 'np.where', (['(val_tags == 521)', '(1)', '(0)'], {}), '(val_tags == 521, 1, 0)\n', (4311, 4334), True, 'import numpy as np\n'), ((5074, 5133), 'torch.tensor', 'torch.tensor', (['val_tags[:val_evt_split]'], {'dtype': 'torch.float32'}), '(val_tags[:val_evt_split], dtype=torch.float32)\n', (5086, 5133), False, 'import torch\n'), ((5166, 5210), 'torch.tensor', 'torch.tensor', (['val_features[:val_track_split]'], {}), '(val_features[:val_track_split])\n', (5178, 5210), False, 'import torch\n'), ((5242, 5285), 'torch.tensor', 'torch.tensor', (['val_idx_vec[:val_track_split]'], {}), '(val_idx_vec[:val_track_split])\n', (5254, 5285), False, 'import torch\n'), ((5366, 5417), 'torch.tensor', 'torch.tensor', (['val_test_tags_np'], {'dtype': 'torch.float32'}), '(val_test_tags_np, dtype=torch.float32)\n', (5378, 5417), False, 'import torch\n'), ((5449, 5493), 'torch.tensor', 'torch.tensor', (['val_features[val_track_split:]'], {}), '(val_features[val_track_split:])\n', (5461, 5493), False, 'import torch\n'), ((5524, 5567), 'torch.tensor', 'torch.tensor', (['val_idx_vec[val_track_split:]'], {}), '(val_idx_vec[val_track_split:])\n', (5536, 5567), False, 'import torch\n'), ((6790, 6800), 'uda_model.UDAModel', 'UDAModel', ([], {}), '()\n', (6798, 6800), False, 'from uda_model import UDAModel\n'), ((7352, 7396), 'torch.tensor', 'torch.tensor', (['train_borders'], {'dtype': 'torch.int'}), '(train_borders, dtype=torch.int)\n', (7364, 7396), False, 'import torch\n'), ((7508, 7556), 'torch.tensor', 'torch.tensor', (['val_train_borders'], {'dtype': 'torch.int'}), '(val_train_borders, dtype=torch.int)\n', (7520, 7556), False, 'import torch\n'), ((8507, 8569), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['output', 'target'], {}), '(output, target)\n', (8553, 8569), True, 'import torch.nn as nn\n'), ((14339, 14402), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""{x} not a floating-point literal"""'], {}), "(f'{x} not a floating-point literal')\n", (14365, 14402), False, 'import argparse\n'), ((10280, 10295), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10293, 10295), False, 'import torch\n'), ((11366, 11381), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11379, 11381), False, 'import torch\n'), ((12603, 12627), 'numpy.argmin', 'np.argmin', (['all_test_loss'], {}), '(all_test_loss)\n', (12612, 12627), True, 'import numpy as np\n'), ((12703, 12726), 'numpy.argmax', 'np.argmax', (['all_test_acc'], {}), '(all_test_acc)\n', (12712, 12726), True, 'import numpy as np\n'), ((12796, 12819), 'numpy.argmin', 'np.argmin', (['all_val_loss'], {}), '(all_val_loss)\n', (12805, 12819), True, 'import numpy as np\n'), ((12888, 12910), 'numpy.argmax', 'np.argmax', (['all_val_acc'], {}), '(all_val_acc)\n', (12897, 12910), True, 'import numpy as np\n'), ((7848, 7866), 'numpy.exp', 'np.exp', (['(-gamma * p)'], {}), '(-gamma * p)\n', (7854, 7866), True, 'import numpy as np\n'), ((12250, 12286), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['test_tags_np', 'mypreds'], {}), '(test_tags_np, mypreds)\n', (12263, 12286), False, 'from sklearn.metrics import roc_auc_score\n'), ((12391, 12432), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['val_test_tags_np', 'valpreds'], {}), '(val_test_tags_np, valpreds)\n', (12404, 12432), False, 'from sklearn.metrics import roc_auc_score\n'), ((9012, 9040), 'torch.zeros_like', 'torch.zeros_like', (['uda_output'], {}), '(uda_output)\n', (9028, 9040), False, 'import torch\n'), ((9202, 9233), 'torch.ones_like', 'torch.ones_like', (['val_uda_output'], {}), '(val_uda_output)\n', (9217, 9233), False, 'import torch\n'), ((1344, 1382), 'torch.unique', 'torch.unique', (['tags'], {'return_counts': '(True)'}), '(tags, return_counts=True)\n', (1356, 1382), False, 'import torch\n'), ((10464, 10526), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['output', 'target'], {}), '(output, target)\n', (10510, 10526), True, 'import torch.nn as nn\n'), ((11549, 11611), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['output', 'target'], {}), '(output, target)\n', (11595, 11611), True, 'import torch.nn as nn\n'), ((10669, 10697), 'torch.zeros_like', 'torch.zeros_like', (['uda_output'], {}), '(uda_output)\n', (10685, 10697), False, 'import torch\n'), ((11754, 11781), 'torch.ones_like', 'torch.ones_like', (['uda_output'], {}), '(uda_output)\n', (11769, 11781), False, 'import torch\n')] |
"""
The ComputationalBasisPOVMEffect class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import functools as _functools
import itertools as _itertools
import numpy as _np
from pygsti.modelmembers.povms.effect import POVMEffect as _POVMEffect
from pygsti.modelmembers import term as _term
from pygsti.evotypes import Evotype as _Evotype
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.polynomial import Polynomial as _Polynomial
try:
from pygsti.tools import fastcalc as _fastcalc
except ImportError:
_fastcalc = None
class ComputationalBasisPOVMEffect(_POVMEffect):
"""
A static POVM effect that is tensor product of 1-qubit Z-eigenstates.
This is called a "computational basis state" in many contexts.
Parameters
----------
zvals : iterable
A list or other iterable of integer 0 or 1 outcomes specifying
which computational basis element this object represents. The
length of `zvals` gives the total number of qubits.
basis : Basis or {'pp','gm','std'}, optional
The basis used to construct the Hilbert-Schmidt space representation
of this state as a super-ket.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
state_space : StateSpace, optional
The state space for this operation. If `None` a default state space
with the appropriate number of qubits is used.
"""
@classmethod
def from_state_vector(cls, vec, basis='pp', evotype="default", state_space=None):
"""
Create a new ComputationalBasisPOVMEffect from a dense vector.
Parameters
----------
vec : numpy.ndarray
A state vector specifying a computational basis state in the
standard basis. This vector has length 4^n for n qubits.
basis : Basis or {'pp','gm','std'}, optional
The basis of `vec` as a super-ket.
evotype : Evotype or str, optional
The evolution type of the resulting effect vector. The special
value `"default"` is equivalent to specifying the value of
`pygsti.evotypes.Evotype.default_evotype`.
state_space : StateSpace, optional
The state space for this operation. If `None` a default state space
with the appropriate number of qubits is used.
Returns
-------
ComputationalBasisPOVMEffect
"""
#if evotype in ('stabilizer', 'statevec'):
# nqubits = int(round(_np.log2(len(vec))))
# v0 = _np.array((1, 0), complex) # '0' qubit state as complex state vec
# v1 = _np.array((0, 1), complex) # '1' qubit state as complex state vec
#else:
nqubits = int(round(_np.log2(len(vec)) / 2))
v0 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, 1), 'd') # '0' qubit state as Pauli dmvec
v1 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, -1), 'd') # '1' qubit state as Pauli dmvec
v = (v0, v1)
for zvals in _itertools.product(*([(0, 1)] * nqubits)):
testvec = _functools.reduce(_np.kron, [v[i] for i in zvals])
if _np.allclose(testvec, vec.flat):
return cls(zvals, basis, evotype, state_space)
raise ValueError(("Given `vec` is not a z-basis product state - "
"cannot construct ComputationalBasisPOVMEffect"))
@classmethod
def from_pure_vector(cls, purevec, basis='pp', evotype="default", state_space=None):
"""
TODO: update docstring
Create a new StabilizerEffectVec from a pure-state vector.
Currently, purevec must be a single computational basis state (it
cannot be a superpostion of multiple of them).
Parameters
----------
purevec : numpy.ndarray
A complex-valued state vector specifying a pure state in the
standard computational basis. This vector has length 2^n for
n qubits.
basis : Basis or {'pp','gm','std'}, optional
The basis of `vec` as a super-ket.
evotype : Evotype or str, optional
The evolution type of the resulting effect vector. The special
value `"default"` is equivalent to specifying the value of
`pygsti.evotypes.Evotype.default_evotype`.
state_space : StateSpace, optional
The state space for this operation. If `None` a default state space
with the appropriate number of qubits is used.
Returns
-------
ComputationalBasisPOVMEffect
"""
nqubits = int(round(_np.log2(len(purevec))))
v = (_np.array([1, 0], 'd'), _np.array([0, 1], 'd')) # (v0,v1)
for zvals in _itertools.product(*([(0, 1)] * nqubits)):
testvec = _functools.reduce(_np.kron, [v[i] for i in zvals])
if _np.allclose(testvec, purevec.flat):
return cls(zvals, basis, evotype, state_space)
raise ValueError(("Given `purevec` must be a z-basis product state - "
"cannot construct StabilizerEffectVec"))
def __init__(self, zvals, basis='pp', evotype="default", state_space=None):
zvals = _np.ascontiguousarray(_np.array(zvals, _np.int64))
state_space = _statespace.default_space_for_num_qubits(len(zvals)) if (state_space is None) \
else _statespace.StateSpace.cast(state_space)
basis = _Basis.cast(basis, state_space.dim) # basis for Hilbert-Schmidt (superop) space
evotype = _Evotype.cast(evotype)
self._evotype = evotype # set this before call to _State.__init__ so self.to_dense() can work...
rep = evotype.create_computational_effect_rep(zvals, basis, state_space)
_POVMEffect.__init__(self, rep, evotype)
def to_dense(self, on_space='minimal', scratch=None):
"""
Return this POVM effect vector as a (dense) numpy array.
The memory in `scratch` maybe used when it is not-None.
Parameters
----------
on_space : {'minimal', 'Hilbert', 'HilbertSchmidt'}
The space that the returned dense operation acts upon. For unitary matrices and bra/ket vectors,
use `'Hilbert'`. For superoperator matrices and super-bra/super-ket vectors use `'HilbertSchmidt'`.
`'minimal'` means that `'Hilbert'` is used if possible given this operator's evolution type, and
otherwise `'HilbertSchmidt'` is used.
scratch : numpy.ndarray, optional
scratch space available for use.
Returns
-------
numpy.ndarray
"""
return self._rep.to_dense(on_space)
def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this POVM effect vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
POVMEffect's parameters, where the polynomial's variable indices index the
*global* parameters of the POVMEffect's parent (usually a :class:`Model`)
, not the POVMEffect's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
max_polynomial_vars : int, optional
maximum number of variables the created polynomials can have.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
if order == 0: # only 0-th order term exists
coeff = _Polynomial({(): 1.0}, max_polynomial_vars)
terms = [_term.RankOnePolynomialEffectTerm.create_from(coeff, self, self,
self._evotype, self.state_space)]
if return_coeff_polys:
coeffs_as_compact_polys = coeff.compact(complex_coeff_tape=True)
return terms, coeffs_as_compact_polys
else:
return terms # Cache terms in FUTURE?
else:
if return_coeff_polys:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
return [], (vtape, ctape)
else:
return []
@property
def num_params(self):
"""
Get the number of independent parameters which specify this POVM effect vector.
Returns
-------
int
the number of independent parameters.
"""
return 0 # no parameters
def to_vector(self):
"""
Get the POVM effect vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return _np.array([], 'd') # no parameters
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the POVM effect vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of POVM effect vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this POVM effect vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
assert(len(v) == 0) # should be no parameters, and nothing to do
def __str__(self):
nQubits = len(self._rep.zvals)
s = "Computational Z-basis POVM effect vec for %d qubits w/z-values: %s" % (nQubits, str(self._rep.zvals))
return s
| [
"pygsti.modelmembers.povms.effect.POVMEffect.__init__",
"numpy.empty",
"numpy.allclose",
"pygsti.baseobjs.basis.Basis.cast",
"pygsti.baseobjs.polynomial.Polynomial",
"pygsti.evotypes.Evotype.cast",
"pygsti.baseobjs.statespace.StateSpace.cast",
"numpy.array",
"pygsti.modelmembers.term.RankOnePolynomi... | [((3858, 3899), 'itertools.product', '_itertools.product', (['*([(0, 1)] * nqubits)'], {}), '(*([(0, 1)] * nqubits))\n', (3876, 3899), True, 'import itertools as _itertools\n'), ((5581, 5622), 'itertools.product', '_itertools.product', (['*([(0, 1)] * nqubits)'], {}), '(*([(0, 1)] * nqubits))\n', (5599, 5622), True, 'import itertools as _itertools\n'), ((6283, 6318), 'pygsti.baseobjs.basis.Basis.cast', '_Basis.cast', (['basis', 'state_space.dim'], {}), '(basis, state_space.dim)\n', (6294, 6318), True, 'from pygsti.baseobjs.basis import Basis as _Basis\n'), ((6383, 6405), 'pygsti.evotypes.Evotype.cast', '_Evotype.cast', (['evotype'], {}), '(evotype)\n', (6396, 6405), True, 'from pygsti.evotypes import Evotype as _Evotype\n'), ((6601, 6641), 'pygsti.modelmembers.povms.effect.POVMEffect.__init__', '_POVMEffect.__init__', (['self', 'rep', 'evotype'], {}), '(self, rep, evotype)\n', (6621, 6641), True, 'from pygsti.modelmembers.povms.effect import POVMEffect as _POVMEffect\n'), ((10554, 10572), 'numpy.array', '_np.array', (['[]', '"""d"""'], {}), "([], 'd')\n", (10563, 10572), True, 'import numpy as _np\n'), ((3655, 3683), 'numpy.array', '_np.array', (['(1, 0, 0, 1)', '"""d"""'], {}), "((1, 0, 0, 1), 'd')\n", (3664, 3683), True, 'import numpy as _np\n'), ((3751, 3780), 'numpy.array', '_np.array', (['(1, 0, 0, -1)', '"""d"""'], {}), "((1, 0, 0, -1), 'd')\n", (3760, 3780), True, 'import numpy as _np\n'), ((3923, 3973), 'functools.reduce', '_functools.reduce', (['_np.kron', '[v[i] for i in zvals]'], {}), '(_np.kron, [v[i] for i in zvals])\n', (3940, 3973), True, 'import functools as _functools\n'), ((3989, 4020), 'numpy.allclose', '_np.allclose', (['testvec', 'vec.flat'], {}), '(testvec, vec.flat)\n', (4001, 4020), True, 'import numpy as _np\n'), ((5501, 5523), 'numpy.array', '_np.array', (['[1, 0]', '"""d"""'], {}), "([1, 0], 'd')\n", (5510, 5523), True, 'import numpy as _np\n'), ((5525, 5547), 'numpy.array', '_np.array', (['[0, 1]', '"""d"""'], {}), "([0, 1], 'd')\n", (5534, 5547), True, 'import numpy as _np\n'), ((5646, 5696), 'functools.reduce', '_functools.reduce', (['_np.kron', '[v[i] for i in zvals]'], {}), '(_np.kron, [v[i] for i in zvals])\n', (5663, 5696), True, 'import functools as _functools\n'), ((5712, 5747), 'numpy.allclose', '_np.allclose', (['testvec', 'purevec.flat'], {}), '(testvec, purevec.flat)\n', (5724, 5747), True, 'import numpy as _np\n'), ((6077, 6104), 'numpy.array', '_np.array', (['zvals', '_np.int64'], {}), '(zvals, _np.int64)\n', (6086, 6104), True, 'import numpy as _np\n'), ((6226, 6266), 'pygsti.baseobjs.statespace.StateSpace.cast', '_statespace.StateSpace.cast', (['state_space'], {}), '(state_space)\n', (6253, 6266), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((9314, 9357), 'pygsti.baseobjs.polynomial.Polynomial', '_Polynomial', (['{(): 1.0}', 'max_polynomial_vars'], {}), '({(): 1.0}, max_polynomial_vars)\n', (9325, 9357), True, 'from pygsti.baseobjs.polynomial import Polynomial as _Polynomial\n'), ((3641, 3652), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (3649, 3652), True, 'import numpy as _np\n'), ((3737, 3748), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (3745, 3748), True, 'import numpy as _np\n'), ((9379, 9481), 'pygsti.modelmembers.term.RankOnePolynomialEffectTerm.create_from', '_term.RankOnePolynomialEffectTerm.create_from', (['coeff', 'self', 'self', 'self._evotype', 'self.state_space'], {}), '(coeff, self, self, self.\n _evotype, self.state_space)\n', (9424, 9481), True, 'from pygsti.modelmembers import term as _term\n'), ((9862, 9885), 'numpy.empty', '_np.empty', (['(0)', '_np.int64'], {}), '(0, _np.int64)\n', (9871, 9885), True, 'import numpy as _np\n'), ((9910, 9931), 'numpy.empty', '_np.empty', (['(0)', 'complex'], {}), '(0, complex)\n', (9919, 9931), True, 'import numpy as _np\n')] |
import numpy as np
import torch.nn as nn
from .layer_ops import ModuleOperation
from .simple_model import SimpleModel, SimpleModelOperation
from src.utils import param_sizes, weight_vector
class ElbowModel(ModuleOperation):
def __init__(self, w_1=None, w_2=None, w_3=None):
self.w_1 = w_1 if not w_1 is None else weight_vector(SimpleModel().parameters())
self.w_2 = w_2 if not w_2 is None else weight_vector(SimpleModel().parameters())
self.w_3 = nn.Parameter(w_3 if not w_3 is None else weight_vector(SimpleModel().parameters()))
def sample(self):
alpha = np.random.random()
beta = np.random.random()
# Randomly choosing a link
min_to_use = self.w_1 if beta > 0.5 else self.w_2
# Randomly choosing a point on a link
w = min_to_use * (1 - alpha) + self.w_3 * alpha
return w
def run_from_weights(self, w, x):
model = SimpleModelOperation(w).train(self.training)
return model(x)
def __call__(self, x):
if self.training:
w = self.sample()
else:
w = self.w_3
return self.run_from_weights(w, x)
def to(self, *args, **kwargs):
self.w_1 = self.w_1.to(*args, **kwargs)
self.w_2 = self.w_2.to(*args, **kwargs)
self.w_3 = nn.Parameter(self.w_3.to(*args, **kwargs))
return self
def parameters(self):
return [self.w_3]
| [
"numpy.random.random"
] | [((601, 619), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (617, 619), True, 'import numpy as np\n'), ((635, 653), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (651, 653), True, 'import numpy as np\n')] |
"""
Title: Making new layers and models via subclassing
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2020/04/13
Description: Complete guide to writing `Layer` and `Model` objects from scratch.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
"""
## The `Layer` class: the combination of state (weights) and some computation
One of the central abstraction in Keras is the `Layer` class. A layer
encapsulates both a state (the layer's "weights") and a transformation from
inputs to outputs (a "call", the layer's forward pass).
Here's a densely-connected layer. It has a state: the variables `w` and `b`.
"""
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
"""
You would use a layer by calling it on some tensor input(s), much like a Python
function.
"""
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
"""
Note that the weights `w` and `b` are automatically tracked by the layer upon
being set as layer attributes:
"""
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
"""
Note you also have access to a quicker shortcut for adding weight to a layer:
the `add_weight()` method:
"""
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
"""
## Layers can have non-trainable weights
Besides trainable weights, you can add non-trainable weights to a layer as
well. Such weights are meant not to be taken into account during
backpropagation, when you are training the layer.
Here's how to add and use a non-trainable weight:
"""
class ComputeSum(keras.layers.Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
x = tf.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
"""
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
"""
print("weights:", len(my_sum.weights))
print("non-trainable weights:", len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print("trainable_weights:", my_sum.trainable_weights)
"""
## Best practice: deferring weight creation until the shape of the inputs is known
Our `Linear` layer above took an `input_dim `argument that was used to compute
the shape of the weights `w` and `b` in `__init__()`:
"""
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
"""
In many cases, you may not know in advance the size of your inputs, and you
would like to lazily create weights when that value becomes known, some time
after instantiating the layer.
In the Keras API, we recommend creating layer weights in the `build(self,
inputs_shape)` method of your layer. Like this:
"""
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
"""
The `__call__()` method of your layer will automatically run build the first time
it is called. You now have a layer that's lazy and thus easier to use:
"""
# At instantiation, we don't know on what inputs this is going to get called
linear_layer = Linear(32)
# The layer's weights are created dynamically the first time the layer is called
y = linear_layer(x)
"""
## Layers are recursively composable
If you assign a Layer instance as an attribute of another Layer, the outer layer
will start tracking the weights of the inner layer.
We recommend creating such sublayers in the `__init__()` method (since the
sublayers will typically have a build method, they will be built when the
outer layer gets built).
"""
# Let's assume we are reusing the Linear class
# with a `build` method that we defined above.
class MLPBlock(keras.layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights
print("weights:", len(mlp.weights))
print("trainable weights:", len(mlp.trainable_weights))
"""
## The `add_loss()` method
When writing the `call()` method of a layer, you can create loss tensors that
you will want to use later, when writing your training loop. This is doable by
calling `self.add_loss(value)`:
"""
# A layer that creates an activity regularization loss
class ActivityRegularizationLayer(keras.layers.Layer):
def __init__(self, rate=1e-2):
super(ActivityRegularizationLayer, self).__init__()
self.rate = rate
def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
"""
These losses (including those created by any inner layer) can be retrieved via
`layer.losses`. This property is reset at the start of every `__call__()` to
the top-level layer, so that `layer.losses` always contains the loss values
created during the last forward pass.
"""
class OuterLayer(keras.layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert len(layer.losses) == 0 # No losses yet since the layer has never been called
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # This is the loss created during the call above
"""
In addition, the `loss` property also contains regularization losses created
for the weights of any inner layer:
"""
class OuterLayerWithKernelRegularizer(keras.layers.Layer):
def __init__(self):
super(OuterLayerWithKernelRegularizer, self).__init__()
self.dense = keras.layers.Dense(
32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)
)
def call(self, inputs):
return self.dense(inputs)
layer = OuterLayerWithKernelRegularizer()
_ = layer(tf.zeros((1, 1)))
# This is `1e-3 * sum(layer.dense.kernel ** 2)`,
# created by the `kernel_regularizer` above.
print(layer.losses)
"""
These losses are meant to be taken into account when writing training loops,
like this:
```python
# Instantiate an optimizer.
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Iterate over the batches of a dataset.
for x_batch_train, y_batch_train in train_dataset:
with tf.GradientTape() as tape:
logits = layer(x_batch_train) # Logits for this minibatch
# Loss value for this minibatch
loss_value = loss_fn(y_batch_train, logits)
# Add extra losses created during this forward pass:
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
```
"""
"""
For a detailed guide about writing training loops, see the
[guide to writing a training loop from scratch](/guides/writing_a_training_loop_from_scratch/).
These losses also work seamlessly with `fit()` (they get automatically summed
and added to the main loss, if any):
"""
import numpy as np
inputs = keras.Input(shape=(3,))
outputs = ActivityRegularizationLayer()(inputs)
model = keras.Model(inputs, outputs)
# If there is a loss passed in `compile`, the regularization
# losses get added to it
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# It's also possible not to pass any loss in `compile`,
# since the model already has a loss to minimize, via the `add_loss`
# call during the forward pass!
model.compile(optimizer="adam")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
"""
## The `add_metric()` method
Similarly to `add_loss()`, layers also have an `add_metric()` method
for tracking the moving average of a quantity during training.
Consider the following layer: a "logistic endpoint" layer.
It takes as inputs predictions & targets, it computes a loss which it tracks
via `add_loss()`, and it computes an accuracy scalar, which it tracks via
`add_metric()`.
"""
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
"""
Metrics tracked in this way are accessible via `layer.metrics`:
"""
layer = LogisticEndpoint()
targets = tf.ones((2, 2))
logits = tf.ones((2, 2))
y = layer(targets, logits)
print("layer.metrics:", layer.metrics)
print("current accuracy value:", float(layer.metrics[0].result()))
"""
Just like for `add_loss()`, these metrics are tracked by `fit()`:
"""
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam")
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
"""
## You can optionally enable serialization on your layers
If you need your custom layers to be serializable as part of a
[Functional model](/guides/functional_api/), you can optionally implement a `get_config()`
method:
"""
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
# Now you can recreate the layer from its config:
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
"""
Note that the `__init__()` method of the base `Layer` class takes some keyword
arguments, in particular a `name` and a `dtype`. It's good practice to pass
these arguments to the parent class in `__init__()` and to include them in the
layer config:
"""
class Linear(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(Linear, self).get_config()
config.update({"units": self.units})
return config
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
"""
If you need more flexibility when deserializing the layer from its config, you
can also override the `from_config()` class method. This is the base
implementation of `from_config()`:
```python
def from_config(cls, config):
return cls(**config)
```
To learn more about serialization and saving, see the complete
[guide to saving and serializing models](/guides/serialization_and_saving/).
"""
"""
## Privileged `training` argument in the `call()` method
Some layers, in particular the `BatchNormalization` layer and the `Dropout`
layer, have different behaviors during training and inference. For such
layers, it is standard practice to expose a `training` (boolean) argument in
the `call()` method.
By exposing this argument in `call()`, you enable the built-in training and
evaluation loops (e.g. `fit()`) to correctly use the layer in training and
inference.
"""
class CustomDropout(keras.layers.Layer):
def __init__(self, rate, **kwargs):
super(CustomDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
"""
## Privileged `mask` argument in the `call()` method
The other privileged argument supported by `call()` is the `mask` argument.
You will find it in all Keras RNN layers. A mask is a boolean tensor (one
boolean value per timestep in the input) used to skip certain input timesteps
when processing timeseries data.
Keras will automatically pass the correct `mask` argument to `__call__()` for
layers that support it, when a mask is generated by a prior layer.
Mask-generating layers are the `Embedding`
layer configured with `mask_zero=True`, and the `Masking` layer.
To learn more about masking and how to write masking-enabled layers, please
check out the guide
["understanding padding and masking"](/guides/understanding_masking_and_padding/).
"""
"""
## The `Model` class
In general, you will use the `Layer` class to define inner computation blocks,
and will use the `Model` class to define the outer model -- the object you
will train.
For instance, in a ResNet50 model, you would have several ResNet blocks
subclassing `Layer`, and a single `Model` encompassing the entire ResNet50
network.
The `Model` class has the same API as `Layer`, with the following differences:
- It exposes built-in training, evaluation, and prediction loops
(`model.fit()`, `model.evaluate()`, `model.predict()`).
- It exposes the list of its inner layers, via the `model.layers` property.
- It exposes saving and serialization APIs (`save()`, `save_weights()`...)
Effectively, the `Layer` class corresponds to what we refer to in the
literature as a "layer" (as in "convolution layer" or "recurrent layer") or as
a "block" (as in "ResNet block" or "Inception block").
Meanwhile, the `Model` class corresponds to what is referred to in the
literature as a "model" (as in "deep learning model") or as a "network" (as in
"deep neural network").
So if you're wondering, "should I use the `Layer` class or the `Model` class?",
ask yourself: will I need to call `fit()` on it? Will I need to call `save()`
on it? If so, go with `Model`. If not (either because your class is just a block
in a bigger system, or because you are writing training & saving code yourself),
use `Layer`.
For instance, we could take our mini-resnet example above, and use it to build
a `Model` that we could train with `fit()`, and that we could save with
`save_weights()`:
"""
"""
```python
class ResNet(tf.keras.Model):
def __init__(self, num_classes=1000):
super(ResNet, self).__init__()
self.block_1 = ResNetBlock()
self.block_2 = ResNetBlock()
self.global_pool = layers.GlobalAveragePooling2D()
self.classifier = Dense(num_classes)
def call(self, inputs):
x = self.block_1(inputs)
x = self.block_2(x)
x = self.global_pool(x)
return self.classifier(x)
resnet = ResNet()
dataset = ...
resnet.fit(dataset, epochs=10)
resnet.save(filepath)
```
"""
"""
## Putting it all together: an end-to-end example
Here's what you've learned so far:
- A `Layer` encapsulate a state (created in `__init__()` or `build()`) and some
computation (defined in `call()`).
- Layers can be recursively nested to create new, bigger computation blocks.
- Layers can create and track losses (typically regularization losses) as well
as metrics, via `add_loss()` and `add_metric()`
- The outer container, the thing you want to train, is a `Model`. A `Model` is
just like a `Layer`, but with added training and serialization utilities.
Let's put all of these things together into an end-to-end example: we're going
to implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.
Our VAE will be a subclass of `Model`, built as a nested composition of layers
that subclass `Layer`. It will feature a regularization loss (KL divergence).
"""
from tensorflow.keras import layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, original_dim, intermediate_dim=64, name="decoder", **kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_output = layers.Dense(original_dim, activation="sigmoid")
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name="autoencoder",
**kwargs
):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed
"""
Let's write a simple training loop on MNIST:
"""
original_dim = 784
vae = VariationalAutoEncoder(original_dim, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
mse_loss_fn = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
epochs = 2
# Iterate over epochs.
for epoch in range(epochs):
print("Start of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_dataset):
with tf.GradientTape() as tape:
reconstructed = vae(x_batch_train)
# Compute reconstruction loss
loss = mse_loss_fn(x_batch_train, reconstructed)
loss += sum(vae.losses) # Add KLD regularization loss
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
loss_metric(loss)
if step % 100 == 0:
print("step %d: mean loss = %.4f" % (step, loss_metric.result()))
"""
Note that since the VAE is subclassing `Model`, it features built-in training
loops. So you could also have trained it like this:
"""
vae = VariationalAutoEncoder(784, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=2, batch_size=64)
"""
## Beyond object-oriented development: the Functional API
Was this example too much object-oriented development for you? You can also
build models using the [Functional API](/guides/functional_api/). Importantly,
choosing one style or another does not prevent you from leveraging components
written in the other style: you can always mix-and-match.
For instance, the Functional API example below reuses the same `Sampling` layer
we defined in the example above:
"""
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Train.
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
"""
For more information, make sure to read the [Functional API guide](/guides/functional_api/).
"""
| [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.backend.random_normal",
"tensorflow.matmul",
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.regularizers.l2",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.keras.... | [((1342, 1357), 'tensorflow.ones', 'tf.ones', (['(2, 2)'], {}), '((2, 2))\n', (1349, 1357), True, 'import tensorflow as tf\n'), ((2132, 2147), 'tensorflow.ones', 'tf.ones', (['(2, 2)'], {}), '((2, 2))\n', (2139, 2147), True, 'import tensorflow as tf\n'), ((2825, 2840), 'tensorflow.ones', 'tf.ones', (['(2, 2)'], {}), '((2, 2))\n', (2832, 2840), True, 'import tensorflow as tf\n'), ((9266, 9289), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (9277, 9289), False, 'from tensorflow import keras\n'), ((9346, 9374), 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (9357, 9374), False, 'from tensorflow import keras\n'), ((11174, 11189), 'tensorflow.ones', 'tf.ones', (['(2, 2)'], {}), '((2, 2))\n', (11181, 11189), True, 'import tensorflow as tf\n'), ((11199, 11214), 'tensorflow.ones', 'tf.ones', (['(2, 2)'], {}), '((2, 2))\n', (11206, 11214), True, 'import tensorflow as tf\n'), ((11434, 11472), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(3,)', 'name': '"""inputs"""'}), "(shape=(3,), name='inputs')\n", (11445, 11472), False, 'from tensorflow import keras\n'), ((11483, 11523), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(10,)', 'name': '"""targets"""'}), "(shape=(10,), name='targets')\n", (11494, 11523), False, 'from tensorflow import keras\n'), ((11641, 11699), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inputs, targets]', 'outputs': 'predictions'}), '(inputs=[inputs, targets], outputs=predictions)\n', (11652, 11699), False, 'from tensorflow import keras\n'), ((21502, 21547), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (21526, 21547), True, 'import tensorflow as tf\n'), ((21561, 21595), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (21593, 21595), True, 'import tensorflow as tf\n'), ((21611, 21634), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (21632, 21634), True, 'import tensorflow as tf\n'), ((21654, 21689), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (21687, 21689), True, 'import tensorflow as tf\n'), ((21769, 21812), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['x_train'], {}), '(x_train)\n', (21803, 21812), True, 'import tensorflow as tf\n'), ((22805, 22850), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (22829, 22850), True, 'import tensorflow as tf\n'), ((23540, 23599), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(original_dim,)', 'name': '"""encoder_input"""'}), "(shape=(original_dim,), name='encoder_input')\n", (23554, 23599), True, 'import tensorflow as tf\n'), ((23827, 23892), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'original_inputs', 'outputs': 'z', 'name': '"""encoder"""'}), "(inputs=original_inputs, outputs=z, name='encoder')\n", (23841, 23892), True, 'import tensorflow as tf\n'), ((23934, 23988), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(latent_dim,)', 'name': '"""z_sampling"""'}), "(shape=(latent_dim,), name='z_sampling')\n", (23948, 23988), True, 'import tensorflow as tf\n'), ((24130, 24199), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'latent_inputs', 'outputs': 'outputs', 'name': '"""decoder"""'}), "(inputs=latent_inputs, outputs=outputs, name='decoder')\n", (24144, 24199), True, 'import tensorflow as tf\n'), ((24248, 24315), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'original_inputs', 'outputs': 'outputs', 'name': '"""vae"""'}), "(inputs=original_inputs, outputs=outputs, name='vae')\n", (24262, 24315), True, 'import tensorflow as tf\n'), ((24489, 24534), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (24513, 24534), True, 'import tensorflow as tf\n'), ((5943, 5965), 'tensorflow.ones', 'tf.ones', ([], {'shape': '(3, 64)'}), '(shape=(3, 64))\n', (5950, 5965), True, 'import tensorflow as tf\n'), ((7317, 7331), 'tensorflow.zeros', 'tf.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (7325, 7331), True, 'import tensorflow as tf\n'), ((7461, 7475), 'tensorflow.zeros', 'tf.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (7469, 7475), True, 'import tensorflow as tf\n'), ((8062, 8078), 'tensorflow.zeros', 'tf.zeros', (['(1, 1)'], {}), '((1, 1))\n', (8070, 8078), True, 'import tensorflow as tf\n'), ((9516, 9540), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (9532, 9540), True, 'import numpy as np\n'), ((9542, 9566), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (9558, 9566), True, 'import numpy as np\n'), ((9768, 9792), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (9784, 9792), True, 'import numpy as np\n'), ((9794, 9818), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (9810, 9818), True, 'import numpy as np\n'), ((11533, 11555), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), '(10)\n', (11551, 11555), False, 'from tensorflow import keras\n'), ((11756, 11780), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (11772, 11780), True, 'import numpy as np\n'), ((11797, 11822), 'numpy.random.random', 'np.random.random', (['(3, 10)'], {}), '((3, 10))\n', (11813, 11822), True, 'import numpy as np\n'), ((23604, 23653), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (23616, 23653), False, 'from tensorflow.keras import layers\n'), ((23680, 23719), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['latent_dim'], {'name': '"""z_mean"""'}), "(latent_dim, name='z_mean')\n", (23692, 23719), False, 'from tensorflow.keras import layers\n'), ((23735, 23777), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['latent_dim'], {'name': '"""z_log_var"""'}), "(latent_dim, name='z_log_var')\n", (23747, 23777), False, 'from tensorflow.keras import layers\n'), ((23993, 24042), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (24005, 24042), False, 'from tensorflow.keras import layers\n'), ((24068, 24116), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['original_dim'], {'activation': '"""sigmoid"""'}), "(original_dim, activation='sigmoid')\n", (24080, 24116), False, 'from tensorflow.keras import layers\n'), ((820, 850), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (848, 850), True, 'import tensorflow as tf\n'), ((1013, 1035), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (1033, 1035), True, 'import tensorflow as tf\n'), ((5815, 5828), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5825, 5828), True, 'import tensorflow as tf\n'), ((5870, 5883), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (5880, 5883), True, 'import tensorflow as tf\n'), ((10380, 10429), 'tensorflow.keras.losses.BinaryCrossentropy', 'keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (10411, 10429), False, 'from tensorflow import keras\n'), ((10457, 10487), 'tensorflow.keras.metrics.BinaryAccuracy', 'keras.metrics.BinaryAccuracy', ([], {}), '()\n', (10485, 10487), False, 'from tensorflow import keras\n'), ((11039, 11060), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (11052, 11060), True, 'import tensorflow as tf\n'), ((19147, 19197), 'tensorflow.keras.backend.random_normal', 'tf.keras.backend.random_normal', ([], {'shape': '(batch, dim)'}), '(shape=(batch, dim))\n', (19177, 19197), True, 'import tensorflow as tf\n'), ((19524, 19573), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (19536, 19573), False, 'from tensorflow.keras import layers\n'), ((19600, 19624), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['latent_dim'], {}), '(latent_dim)\n', (19612, 19624), False, 'from tensorflow.keras import layers\n'), ((19654, 19678), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['latent_dim'], {}), '(latent_dim)\n', (19666, 19678), False, 'from tensorflow.keras import layers\n'), ((20218, 20267), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (20230, 20267), False, 'from tensorflow.keras import layers\n'), ((20296, 20344), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['original_dim'], {'activation': '"""sigmoid"""'}), "(original_dim, activation='sigmoid')\n", (20308, 20344), False, 'from tensorflow.keras import layers\n'), ((22879, 22913), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (22911, 22913), True, 'import tensorflow as tf\n'), ((24562, 24596), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (24594, 24596), True, 'import tensorflow as tf\n'), ((1202, 1227), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (1211, 1227), True, 'import tensorflow as tf\n'), ((2091, 2116), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (2100, 2116), True, 'import tensorflow as tf\n'), ((2762, 2791), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['inputs'], {'axis': '(0)'}), '(inputs, axis=0)\n', (2775, 2791), True, 'import tensorflow as tf\n'), ((3829, 3854), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (3838, 3854), True, 'import tensorflow as tf\n'), ((4679, 4704), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (4688, 4704), True, 'import tensorflow as tf\n'), ((12570, 12595), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (12579, 12595), True, 'import tensorflow as tf\n'), ((13594, 13619), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (13603, 13619), True, 'import tensorflow as tf\n'), ((14998, 15035), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs'], {'rate': 'self.rate'}), '(inputs, rate=self.rate)\n', (15011, 15035), True, 'import tensorflow as tf\n'), ((19075, 19091), 'tensorflow.shape', 'tf.shape', (['z_mean'], {}), '(z_mean)\n', (19083, 19091), True, 'import tensorflow as tf\n'), ((19109, 19125), 'tensorflow.shape', 'tf.shape', (['z_mean'], {}), '(z_mean)\n', (19117, 19125), True, 'import tensorflow as tf\n'), ((22103, 22120), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (22118, 22120), True, 'import tensorflow as tf\n'), ((2662, 2684), 'tensorflow.zeros', 'tf.zeros', (['(input_dim,)'], {}), '((input_dim,))\n', (2670, 2684), True, 'import tensorflow as tf\n'), ((6634, 6655), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['inputs'], {}), '(inputs)\n', (6647, 6655), True, 'import tensorflow as tf\n'), ((7904, 7935), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (7928, 7935), True, 'import tensorflow as tf\n'), ((19222, 19245), 'tensorflow.exp', 'tf.exp', (['(0.5 * z_log_var)'], {}), '(0.5 * z_log_var)\n', (19228, 19245), True, 'import tensorflow as tf\n'), ((24422, 24439), 'tensorflow.exp', 'tf.exp', (['z_log_var'], {}), '(z_log_var)\n', (24428, 24439), True, 'import tensorflow as tf\n'), ((24402, 24419), 'tensorflow.square', 'tf.square', (['z_mean'], {}), '(z_mean)\n', (24411, 24419), True, 'import tensorflow as tf\n'), ((21271, 21288), 'tensorflow.exp', 'tf.exp', (['z_log_var'], {}), '(z_log_var)\n', (21277, 21288), True, 'import tensorflow as tf\n'), ((21251, 21268), 'tensorflow.square', 'tf.square', (['z_mean'], {}), '(z_mean)\n', (21260, 21268), True, 'import tensorflow as tf\n')] |
import numpy as np
import os
import pandas as pd
from ..utils import (drop_tseconds_volume, read_ndata,
write_ndata,compute_FD,generate_mask,interpolate_masked_data)
from nipype.interfaces.base import (traits, TraitedSpec, BaseInterfaceInputSpec, File,
SimpleInterface )
from nipype import logging
from nipype.utils.filemanip import fname_presuffix
class _removeTRInputSpec(BaseInterfaceInputSpec):
bold_file = File(exists=True,mandatory=True, desc=" either bold or nifti ")
mask_file = File(exists=False,mandatory=False, desc ="required for nifti")
time_todrop = traits.Float(exists=True,mandatory=True, desc="time in seconds to drop")
TR = traits.Float(exists=True,mandatory=True, desc="repetition time in TR")
fmriprep_conf = File(exists=True,mandatory=False,desc="confound selected from fmriprep confound matrix")
class _removeTROutputSpec(TraitedSpec):
fmrip_confdropTR = File(exists=True, manadatory=True,
desc="fmriprep confound after removing TRs,")
bold_file_TR = File(exists=True,mandatory=True, desc=" either bold or nifti modified")
class removeTR(SimpleInterface):
r"""
testing and documentation open to me
"""
input_spec = _removeTRInputSpec
output_spec = _removeTROutputSpec
def _run_interface(self, runtime):
# get the nifti or cifti
data_matrix = read_ndata(datafile=self.inputs.bold_file,
maskfile=self.inputs.mask_file)
fmriprepx_conf = pd.read_csv(self.inputs.fmriprep_conf,header=None)
data_matrix_TR,fmriprep_confTR = drop_tseconds_volume (
data_matrix=data_matrix,confound=fmriprepx_conf,
delets=self.inputs.time_todrop,
TR=self.inputs.TR )
#write the output out
self._results['bold_file_TR'] = fname_presuffix(
self.inputs.bold_file,
newpath=os.getcwd(),
use_ext=True)
self._results['fmrip_confdropTR'] = fname_presuffix(
self.inputs.bold_file,
suffix='fmriprep_dropTR.txt', newpath=os.getcwd(),
use_ext=False)
write_ndata(data_matrix=data_matrix_TR,template=self.inputs.bold_file,
mask=self.inputs.mask_file, filename=self._results['bold_file_TR'],
tr=self.inputs.TR)
fmriprep_confTR.to_csv(self._results['fmrip_confdropTR'],index=False,header=False)
return runtime
class _censorscrubInputSpec(BaseInterfaceInputSpec):
bold_file = File(exists=True,mandatory=True, desc=" raw bold or nifti real")
in_file =File(exists=True,mandatory=True, desc=" bold or nifti")
fd_thresh = traits.Float(exists=True,mandatory=True, desc ="fd_threshold")
mask_file = File(exists=False,mandatory=False, desc ="required for nifti")
TR = traits.Float(exists=True,mandatory=True, desc="repetition time in TR")
custom_conf = traits.Either(
traits.Undefined, File,
desc="name of output file with field or true",exists=False,mandatory=False)
#custom_conf = File(exists=False,mandatory=False,desc=" custom confound")
fmriprep_conf= File(exists=True,mandatory=True,
desc=" confound selected from fmriprep confound matrix ")
head_radius = traits.Float(exists=False,mandatory=False, default_value=50,
desc="head radius in mm ")
filtertype = traits.Float(exists=False,mandatory=False)
time_todrop = traits.Float(exists=False,mandatory=False,default_value=0, desc="time in seconds to drop")
low_freq= traits.Float(exit=False,mandatory=False, desc=' low frequency band for nortch filterin breathe per min (bpm)')
high_freq= traits.Float(exit=False,mandatory=False, desc=' high frequency for nortch filter in breathe per min (bpm)')
class _censorscrubOutputSpec(TraitedSpec):
bold_censored = File(exists=True, manadatory=True,
desc=" fmriprep censored")
fmriprepconf_censored = File(exists=True,mandatory=True,
desc=" fmriprep_conf censored")
customconf_censored = File(exists=False,mandatory=False, desc="custom conf censored")
tmask = File(exists=True,mandatory=True,desc="temporal mask")
fd_timeseries = File(exists=True,mandatory=True,desc="fd timeseries")
class censorscrub(SimpleInterface):
r"""
generate temporal masking with volumes above fd threshold
.. testsetup::
>>> from tempfile import TemporaryDirectory
>>> tmpdir = TemporaryDirectory()
>>> os.chdir(tmpdir.name)
.. doctest::
>>> cscrub = censorscrub()
>>> cscrub.inputs.bold_file = cleanbold
>>> cscrub.inputs.in_file = datafile
>>> cscrub.inputs.TR = TR
>>> cscrub.inputs.fd_thresh = fd_thresh
>>> cscrub.inputs.fmriprep_conf = fmriprepconf
>>> cscrub.inputs.mask_file = mask
>>> cscrub.inputs.time_todrop = dummytime
>>> cscrub.run()
.. testcleanup::
>>> tmpdir.cleanup()
"""
input_spec = _censorscrubInputSpec
output_spec = _censorscrubOutputSpec
def _run_interface(self, runtime):
# get the raw confound matrix and compute
# conf_matrix = load_confound(datafile=self.inputs.bold_file)
# fd_timeseries = compute_FD(confound=conf_matrix[0],
# head_radius=self.inputs.head_radius)
from ..utils.confounds import (load_confound, load_motion)
conf_matrix = load_confound(datafile=self.inputs.bold_file)[0]
motion_conf = load_motion(conf_matrix.copy(),TR=self.inputs.TR,filtertype=self.inputs.filtertype,freqband=[self.inputs.low_freq,self.inputs.high_freq])
motion_df = pd.DataFrame(data=motion_conf.values,columns=["rot_x", "rot_y", "rot_z","trans_x", "trans_y","trans_z"])
fd_timeseries = compute_FD(confound=motion_df, head_radius=self.inputs.head_radius)
### read confound
dataxx = read_ndata(datafile=self.inputs.in_file, maskfile=self.inputs.mask_file)
fmriprepx_conf = pd.read_csv(self.inputs.fmriprep_conf,header=None)
if self.inputs.custom_conf:
customx_conf = pd.read_csv(self.inputs.custom_conf,header=None)
if self.inputs.time_todrop == 0:
# do censoring staright
tmask = generate_mask(fd_res=fd_timeseries,fd_thresh=self.inputs.fd_thresh)
if np.sum(tmask) > 0:
datax_censored = dataxx[:,tmask==0]
fmriprepx_censored = fmriprepx_conf.drop(fmriprepx_conf.index[np.where(tmask==1)])
if self.inputs.custom_conf:
customx_censored = customx_conf.drop(customx_conf.index[np.where(tmask==1)])
else:
datax_censored = dataxx
fmriprepx_censored = fmriprepx_conf
if self.inputs.custom_conf:
customx_censored = customx_conf
fd_timeseries2 = fd_timeseries
else:
num_vol = np.int(np.divide(self.inputs.time_todrop,self.inputs.TR))
fd_timeseries2 = fd_timeseries
fd_timeseries2 = fd_timeseries2[num_vol:]
tmask = generate_mask(fd_res=fd_timeseries2,fd_thresh=self.inputs.fd_thresh)
if np.sum(tmask) > 0:
datax_censored = dataxx[:,tmask==0]
fmriprepx_censored = fmriprepx_conf.drop(fmriprepx_conf.index[np.where(tmask==1)])
if self.inputs.custom_conf:
customx_censored = customx_conf.drop(customx_conf.index[np.where(tmask==1)])
else:
datax_censored = dataxx
fmriprepx_censored = fmriprepx_conf
if self.inputs.custom_conf:
customx_censored = customx_conf
### get the output
self._results['bold_censored'] = fname_presuffix(
self.inputs.in_file,
newpath=os.getcwd(),
use_ext=True)
self._results['fmriprepconf_censored'] = fname_presuffix(
self.inputs.in_file,
suffix='fmriprepconf_censored.csv', newpath=os.getcwd(),
use_ext=False)
self._results['customconf_censored'] = fname_presuffix(
self.inputs.in_file,
suffix='customconf_censored.txt', newpath=os.getcwd(),
use_ext=False)
self._results['tmask'] = fname_presuffix(
self.inputs.in_file,
suffix='temporalmask.tsv', newpath=os.getcwd(),
use_ext=False)
self._results['fd_timeseries'] = fname_presuffix(
self.inputs.in_file,
suffix='fd_timeseries.tsv', newpath=os.getcwd(),
use_ext=False)
write_ndata(data_matrix=datax_censored,template=self.inputs.in_file,
mask=self.inputs.mask_file, filename=self._results['bold_censored'],
tr=self.inputs.TR)
fmriprepx_censored.to_csv(self._results['fmriprepconf_censored'],index=False,header=False)
np.savetxt(self._results['tmask'],tmask,fmt="%d",delimiter=',')
np.savetxt(self._results['fd_timeseries'],fd_timeseries2,fmt="%1.4f",delimiter=',')
if self.inputs.custom_conf:
customx_censored.to_csv(self._results['customconf_censored'],index=False,header=False)
return runtime
## interpolation
class _interpolateInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True,mandatory=True, desc=" censored or clean bold")
bold_file = File(exists=True,mandatory=True, desc=" censored or clean bold")
tmask = File(exists=True,mandatory=True,desc="temporal mask")
mask_file = File(exists=False,mandatory=False, desc ="required for nifti")
TR = traits.Float(exists=True,mandatory=True, desc="repetition time in TR")
class _interpolateOutputSpec(TraitedSpec):
bold_interpolated = File(exists=True, manadatory=True,
desc=" fmriprep censored")
class interpolate(SimpleInterface):
r"""
interpolate data over the clean bold
.. testsetup::
>>> from tempfile import TemporaryDirectory
>>> tmpdir = TemporaryDirectory()
>>> os.chdir(tmpdir.name)
.. doctest::
>>> interpolatewf = interpolate()
>>> interpolatewf.inputs.in_file = datafile
>>> interpolatewf.inputs.bold_file = rawbold
>>> interpolatewf.inputs.TR = TR
>>> interpolatewf.inputs.tmask = temporalmask
>>> interpolatewf.inputs.mask_file = mask
>>> interpolatewf.run()
.. testcleanup::
>>> tmpdir.cleanup()
"""
input_spec = _interpolateInputSpec
output_spec = _interpolateOutputSpec
def _run_interface(self, runtime):
datax = read_ndata(datafile=self.inputs.in_file,
maskfile=self.inputs.mask_file)
tmask = np.loadtxt(self.inputs.tmask)
if datax.shape[1]!= len(tmask):
fulldata = np.zeros([datax.shape[0],len(tmask)])
fulldata[:,tmask==0]=datax
else:
fulldata = datax
recon_data = interpolate_masked_data(img_datax=fulldata, tmask=tmask,
TR=self.inputs.TR)
self._results['bold_interpolated'] = fname_presuffix(
self.inputs.in_file,
newpath=os.getcwd(),
use_ext=True)
write_ndata(data_matrix=recon_data,template=self.inputs.bold_file,
mask=self.inputs.mask_file,tr=self.inputs.TR,
filename=self._results['bold_interpolated'])
return runtime
| [
"pandas.DataFrame",
"numpy.divide",
"numpy.sum",
"pandas.read_csv",
"nipype.interfaces.base.traits.Float",
"os.getcwd",
"numpy.savetxt",
"nipype.interfaces.base.File",
"numpy.where",
"numpy.loadtxt",
"nipype.interfaces.base.traits.Either"
] | [((441, 505), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" either bold or nifti """'}), "(exists=True, mandatory=True, desc=' either bold or nifti ')\n", (445, 505), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((521, 583), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(False)', 'mandatory': '(False)', 'desc': '"""required for nifti"""'}), "(exists=False, mandatory=False, desc='required for nifti')\n", (525, 583), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((602, 675), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""time in seconds to drop"""'}), "(exists=True, mandatory=True, desc='time in seconds to drop')\n", (614, 675), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((684, 755), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""repetition time in TR"""'}), "(exists=True, mandatory=True, desc='repetition time in TR')\n", (696, 755), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((775, 870), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(False)', 'desc': '"""confound selected from fmriprep confound matrix"""'}), "(exists=True, mandatory=False, desc=\n 'confound selected from fmriprep confound matrix')\n", (779, 870), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((929, 1014), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'manadatory': '(True)', 'desc': '"""fmriprep confound after removing TRs,"""'}), "(exists=True, manadatory=True, desc='fmriprep confound after removing TRs,'\n )\n", (933, 1014), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((1068, 1140), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" either bold or nifti modified"""'}), "(exists=True, mandatory=True, desc=' either bold or nifti modified')\n", (1072, 1140), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((2652, 2717), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" raw bold or nifti real"""'}), "(exists=True, mandatory=True, desc=' raw bold or nifti real')\n", (2656, 2717), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((2730, 2786), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" bold or nifti"""'}), "(exists=True, mandatory=True, desc=' bold or nifti')\n", (2734, 2786), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((2802, 2864), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""fd_threshold"""'}), "(exists=True, mandatory=True, desc='fd_threshold')\n", (2814, 2864), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((2881, 2943), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(False)', 'mandatory': '(False)', 'desc': '"""required for nifti"""'}), "(exists=False, mandatory=False, desc='required for nifti')\n", (2885, 2943), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((2953, 3024), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""repetition time in TR"""'}), "(exists=True, mandatory=True, desc='repetition time in TR')\n", (2965, 3024), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3042, 3162), 'nipype.interfaces.base.traits.Either', 'traits.Either', (['traits.Undefined', 'File'], {'desc': '"""name of output file with field or true"""', 'exists': '(False)', 'mandatory': '(False)'}), "(traits.Undefined, File, desc=\n 'name of output file with field or true', exists=False, mandatory=False)\n", (3055, 3162), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3270, 3366), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" confound selected from fmriprep confound matrix """'}), "(exists=True, mandatory=True, desc=\n ' confound selected from fmriprep confound matrix ')\n", (3274, 3366), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3406, 3500), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(False)', 'mandatory': '(False)', 'default_value': '(50)', 'desc': '"""head radius in mm """'}), "(exists=False, mandatory=False, default_value=50, desc=\n 'head radius in mm ')\n", (3418, 3500), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3539, 3582), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(False)', 'mandatory': '(False)'}), '(exists=False, mandatory=False)\n', (3551, 3582), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3600, 3697), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(False)', 'mandatory': '(False)', 'default_value': '(0)', 'desc': '"""time in seconds to drop"""'}), "(exists=False, mandatory=False, default_value=0, desc=\n 'time in seconds to drop')\n", (3612, 3697), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3705, 3821), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exit': '(False)', 'mandatory': '(False)', 'desc': '""" low frequency band for nortch filterin breathe per min (bpm)"""'}), "(exit=False, mandatory=False, desc=\n ' low frequency band for nortch filterin breathe per min (bpm)')\n", (3717, 3821), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((3831, 3944), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exit': '(False)', 'mandatory': '(False)', 'desc': '""" high frequency for nortch filter in breathe per min (bpm)"""'}), "(exit=False, mandatory=False, desc=\n ' high frequency for nortch filter in breathe per min (bpm)')\n", (3843, 3944), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((4005, 4066), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'manadatory': '(True)', 'desc': '""" fmriprep censored"""'}), "(exists=True, manadatory=True, desc=' fmriprep censored')\n", (4009, 4066), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((4133, 4198), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" fmriprep_conf censored"""'}), "(exists=True, mandatory=True, desc=' fmriprep_conf censored')\n", (4137, 4198), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((4261, 4325), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(False)', 'mandatory': '(False)', 'desc': '"""custom conf censored"""'}), "(exists=False, mandatory=False, desc='custom conf censored')\n", (4265, 4325), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((4337, 4392), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""temporal mask"""'}), "(exists=True, mandatory=True, desc='temporal mask')\n", (4341, 4392), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((4411, 4466), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""fd timeseries"""'}), "(exists=True, mandatory=True, desc='fd timeseries')\n", (4415, 4466), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((9657, 9722), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" censored or clean bold"""'}), "(exists=True, mandatory=True, desc=' censored or clean bold')\n", (9661, 9722), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((9738, 9803), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '""" censored or clean bold"""'}), "(exists=True, mandatory=True, desc=' censored or clean bold')\n", (9742, 9803), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((9815, 9870), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""temporal mask"""'}), "(exists=True, mandatory=True, desc='temporal mask')\n", (9819, 9870), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((9885, 9947), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(False)', 'mandatory': '(False)', 'desc': '"""required for nifti"""'}), "(exists=False, mandatory=False, desc='required for nifti')\n", (9889, 9947), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((9957, 10028), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""repetition time in TR"""'}), "(exists=True, mandatory=True, desc='repetition time in TR')\n", (9969, 10028), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((10098, 10159), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'manadatory': '(True)', 'desc': '""" fmriprep censored"""'}), "(exists=True, manadatory=True, desc=' fmriprep censored')\n", (10102, 10159), False, 'from nipype.interfaces.base import traits, TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface\n'), ((1554, 1605), 'pandas.read_csv', 'pd.read_csv', (['self.inputs.fmriprep_conf'], {'header': 'None'}), '(self.inputs.fmriprep_conf, header=None)\n', (1565, 1605), True, 'import pandas as pd\n'), ((5838, 5949), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'motion_conf.values', 'columns': "['rot_x', 'rot_y', 'rot_z', 'trans_x', 'trans_y', 'trans_z']"}), "(data=motion_conf.values, columns=['rot_x', 'rot_y', 'rot_z',\n 'trans_x', 'trans_y', 'trans_z'])\n", (5850, 5949), True, 'import pandas as pd\n'), ((6193, 6244), 'pandas.read_csv', 'pd.read_csv', (['self.inputs.fmriprep_conf'], {'header': 'None'}), '(self.inputs.fmriprep_conf, header=None)\n', (6204, 6244), True, 'import pandas as pd\n'), ((9252, 9318), 'numpy.savetxt', 'np.savetxt', (["self._results['tmask']", 'tmask'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(self._results['tmask'], tmask, fmt='%d', delimiter=',')\n", (9262, 9318), True, 'import numpy as np\n'), ((9324, 9414), 'numpy.savetxt', 'np.savetxt', (["self._results['fd_timeseries']", 'fd_timeseries2'], {'fmt': '"""%1.4f"""', 'delimiter': '""","""'}), "(self._results['fd_timeseries'], fd_timeseries2, fmt='%1.4f',\n delimiter=',')\n", (9334, 9414), True, 'import numpy as np\n'), ((11042, 11071), 'numpy.loadtxt', 'np.loadtxt', (['self.inputs.tmask'], {}), '(self.inputs.tmask)\n', (11052, 11071), True, 'import numpy as np\n'), ((6324, 6373), 'pandas.read_csv', 'pd.read_csv', (['self.inputs.custom_conf'], {'header': 'None'}), '(self.inputs.custom_conf, header=None)\n', (6335, 6373), True, 'import pandas as pd\n'), ((2008, 2019), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2017, 2019), False, 'import os\n'), ((2206, 2217), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2215, 2217), False, 'import os\n'), ((6566, 6579), 'numpy.sum', 'np.sum', (['tmask'], {}), '(tmask)\n', (6572, 6579), True, 'import numpy as np\n'), ((7171, 7221), 'numpy.divide', 'np.divide', (['self.inputs.time_todrop', 'self.inputs.TR'], {}), '(self.inputs.time_todrop, self.inputs.TR)\n', (7180, 7221), True, 'import numpy as np\n'), ((7428, 7441), 'numpy.sum', 'np.sum', (['tmask'], {}), '(tmask)\n', (7434, 7441), True, 'import numpy as np\n'), ((8103, 8114), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8112, 8114), False, 'import os\n'), ((8309, 8320), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8318, 8320), False, 'import os\n'), ((8512, 8523), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8521, 8523), False, 'import os\n'), ((8694, 8705), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8703, 8705), False, 'import os\n'), ((8885, 8896), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8894, 8896), False, 'import os\n'), ((11500, 11511), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11509, 11511), False, 'import os\n'), ((6716, 6736), 'numpy.where', 'np.where', (['(tmask == 1)'], {}), '(tmask == 1)\n', (6724, 6736), True, 'import numpy as np\n'), ((7577, 7597), 'numpy.where', 'np.where', (['(tmask == 1)'], {}), '(tmask == 1)\n', (7585, 7597), True, 'import numpy as np\n'), ((6857, 6877), 'numpy.where', 'np.where', (['(tmask == 1)'], {}), '(tmask == 1)\n', (6865, 6877), True, 'import numpy as np\n'), ((7718, 7738), 'numpy.where', 'np.where', (['(tmask == 1)'], {}), '(tmask == 1)\n', (7726, 7738), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections.abc
import math
from typing import List, Sequence, Tuple
import cv2
import numpy as np
from megengine.data.transform import Transform
from megengine.data.transform.vision import functional as F
__all__ = [
"VisionTransform",
"ToMode",
"Compose",
"TorchTransformCompose",
"Pad",
"Resize",
"ShortestEdgeResize",
"RandomResize",
"RandomCrop",
"RandomResizedCrop",
"CenterCrop",
"RandomHorizontalFlip",
"RandomVerticalFlip",
"Normalize",
"GaussianNoise",
"BrightnessTransform",
"SaturationTransform",
"ContrastTransform",
"HueTransform",
"ColorJitter",
"Lighting",
]
class VisionTransform(Transform):
r"""Base class of all transforms used in computer vision.
Calling logic: apply_batch() -> apply() -> _apply_image() and other _apply_*()
method. If you want to implement a self-defined transform method for image,
rewrite _apply_image method in subclass.
Args:
order: input type order. Input is a tuple containing different structures,
order is used to specify the order of structures. For example, if your input
is (image, boxes) type, then the ``order`` should be ("image", "boxes").
Current available strings and data type are describe below:
* "image": input image, with shape of `(H, W, C)`.
* "coords": coordinates, with shape of `(N, 2)`.
* "boxes": bounding boxes, with shape of `(N, 4)`, "xyxy" format,
the 1st "xy" represents top left point of a box,
the 2nd "xy" represents right bottom point.
* "mask": map used for segmentation, with shape of `(H, W, 1)`.
* "keypoints": keypoints with shape of `(N, K, 3)`, N for number of instances,
and K for number of keypoints in one instance. The first two dimensions
of last axis is coordinate of keypoints and the the 3rd dimension is
the label of keypoints.
* "polygons": a sequence containing numpy arrays, its length is the number of instances.
Each numpy array represents polygon coordinate of one instance.
* "category": categories for some data type. For example, "image_category"
means category of the input image and "boxes_category" means categories of
bounding boxes.
* "info": information for images such as image shapes and image path.
You can also customize your data types only if you implement the corresponding
_apply_*() methods, otherwise ``NotImplementedError`` will be raised.
"""
def __init__(self, order=None):
super().__init__()
if order is None:
order = ("image",)
elif not isinstance(order, collections.abc.Sequence):
raise ValueError(
"order should be a sequence, but got order={}".format(order)
)
for k in order:
if k in ("batch",):
raise ValueError("{} is invalid data type".format(k))
elif k.endswith("category") or k.endswith("info"):
# when the key is *category or info, we should do nothing
# if the corresponding apply methods are not implemented.
continue
elif self._get_apply(k) is None:
raise NotImplementedError("{} is unsupported data type".format(k))
self.order = order
def apply_batch(self, inputs: Sequence[Tuple]):
r"""Apply transform on batch input data."""
return tuple(self.apply(input) for input in inputs)
def apply(self, input: Tuple):
r"""Apply transform on single input data."""
if not isinstance(input, tuple):
input = (input,)
output = []
for i in range(min(len(input), len(self.order))):
apply_func = self._get_apply(self.order[i])
if apply_func is None:
output.append(input[i])
else:
output.append(apply_func(input[i]))
if len(input) > len(self.order):
output.extend(input[len(self.order) :])
if len(output) == 1:
output = output[0]
else:
output = tuple(output)
return output
def _get_apply(self, key):
return getattr(self, "_apply_{}".format(key), None)
def _get_image(self, input: Tuple):
if not isinstance(input, tuple):
input = (input,)
return input[self.order.index("image")]
def _apply_image(self, image):
raise NotImplementedError
def _apply_coords(self, coords):
raise NotImplementedError
def _apply_boxes(self, boxes):
idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
coords = np.asarray(boxes).reshape(-1, 4)[:, idxs].reshape(-1, 2)
coords = self._apply_coords(coords).reshape((-1, 4, 2))
minxy = coords.min(axis=1)
maxxy = coords.max(axis=1)
trans_boxes = np.concatenate((minxy, maxxy), axis=1)
return trans_boxes
def _apply_mask(self, mask):
raise NotImplementedError
def _apply_keypoints(self, keypoints):
coords, visibility = keypoints[..., :2], keypoints[..., 2:]
trans_coords = [self._apply_coords(p) for p in coords]
return np.concatenate((trans_coords, visibility), axis=-1)
def _apply_polygons(self, polygons):
return [[self._apply_coords(p) for p in instance] for instance in polygons]
class ToMode(VisionTransform):
r"""Change input data to a target mode.
For example, most transforms use HWC mode image,
while the neural network might use CHW mode input tensor.
Args:
mode: output mode of input. Default: "CHW"
order: the same with :class:`VisionTransform`
"""
def __init__(self, mode="CHW", *, order=None):
super().__init__(order)
assert mode in ["CHW"], "unsupported mode: {}".format(mode)
self.mode = mode
def _apply_image(self, image):
if self.mode == "CHW":
return np.ascontiguousarray(np.rollaxis(image, 2))
return image
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
if self.mode == "CHW":
return np.ascontiguousarray(np.rollaxis(mask, 2))
return mask
class Compose(VisionTransform):
r"""Composes several transfomations together.
Args:
transforms: list of :class:`VisionTransform` to compose.
batch_compose: whether keep the same transform order in batch data when shuffle.
shuffle_indices: indices used for random shuffle, start at 1.
order: the same with :class:`VisionTransform`
.. seealso:: Refer to :mod:`~.data.transform` module for vision transform APIs.
Examples:
>>> import megengine.data.transform as T
>>> T.Compose([ # doctest: +SKIP
... T.RandomHorizontalFlip(), # 1st
... T.RandomVerticalFlip(), # 2nd
... T.CenterCrop(100), # 3rd
... T.ToMode("CHW"), # 4th
... ],
... shuffle_indices=[(1, 2, 3)]
... )
In this case, ``shuffle_indices`` is given so each input data will be transformed
out of order:
.. math::
\begin{array}{cc}
[{\color{red}1 \quad 2 \quad 3} \quad 4] & [{\color{red}1 \quad 3 \quad 2} \quad 4] \\
[{\color{red}2 \quad 1 \quad 3} \quad 4] & [{\color{red}2 \quad 3 \quad 1} \quad 4] \\
[{\color{red}3 \quad 1 \quad 2} \quad 4] & [{\color{red}3 \quad 2 \quad 1} \quad 4]
\end{array}
In another case, if ``[(1, 3), (2, 4)]`` is given, then the 1st and 3rd transfomation
will be random shuffled, the 2nd and 4th transfomation will also be shuffled:
.. math::
\begin{array}{cc}
[{\color{red}1} \quad {\color{blue}2} \quad {\color{red}3} \quad {\color{blue}4}] &
[{\color{red}1} \quad {\color{blue}4} \quad {\color{red}3} \quad {\color{blue}2}] \\
[{\color{red}3} \quad {\color{blue}2} \quad {\color{red}1} \quad {\color{blue}4}] &
[{\color{red}3} \quad {\color{blue}4} \quad {\color{red}1} \quad {\color{blue}2}]
\end{array}
Different colors represent different groups that need to be internally shuffled.
.. warning::
Different samples within each batch will also use random transfomation orders,
unless ``batch_compose`` is set to ``True``.
"""
def __init__(
self,
transforms: List[VisionTransform] = [],
batch_compose: bool = False,
shuffle_indices: List[Tuple] = None,
*,
order=None
):
super().__init__(order)
self.transforms = transforms
self._set_order()
if batch_compose and shuffle_indices is not None:
raise ValueError(
"Do not support shuffle when apply transforms along the whole batch"
)
self.batch_compose = batch_compose
if shuffle_indices is not None:
shuffle_indices = [tuple(x - 1 for x in idx) for idx in shuffle_indices]
self.shuffle_indices = shuffle_indices
def _set_order(self):
for t in self.transforms:
t.order = self.order
if isinstance(t, Compose):
t._set_order()
def apply_batch(self, inputs: Sequence[Tuple]):
if self.batch_compose:
for t in self.transforms:
inputs = t.apply_batch(inputs)
return inputs
else:
return super().apply_batch(inputs)
def apply(self, input: Tuple):
for t in self._shuffle():
input = t.apply(input)
return input
def _shuffle(self):
if self.shuffle_indices is not None:
source_idx = list(range(len(self.transforms)))
for idx in self.shuffle_indices:
shuffled = np.random.permutation(idx).tolist()
for src, dst in zip(idx, shuffled):
source_idx[src] = dst
return [self.transforms[i] for i in source_idx]
else:
return self.transforms
class TorchTransformCompose(VisionTransform):
r"""Compose class used for transforms in torchvision, only support PIL image,
some transforms with tensor in torchvision are not supported,
such as Normalize and ToTensor in torchvision.
Args:
transforms: the same with ``Compose``.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, transforms, *, order=None):
super().__init__(order)
self.transforms = transforms
def _apply_image(self, image):
from PIL import Image
try:
import accimage
except ImportError:
accimage = None
if image.shape[0] == 3: # CHW
image = np.ascontiguousarray(image[[2, 1, 0]])
elif image.shape[2] == 3: # HWC
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image.astype(np.uint8))
for t in self.transforms:
image = t(image)
if isinstance(image, Image.Image) or (
accimage is not None and isinstance(image, accimage.Image)
):
image = np.array(image, dtype=np.uint8)
if image.shape[0] == 3: # CHW
image = np.ascontiguousarray(image[[2, 1, 0]])
elif image.shape[2] == 3: # HWC
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
class Pad(VisionTransform):
r"""Pad the input data.
Args:
size: padding size of input image, it could be integer or sequence.
If it is an integer, the input image will be padded in four directions.
If it is a sequence containing two integers, the bottom and right side
of image will be padded.
If it is a sequence containing four integers, the top, bottom, left, right
side of image will be padded with given size.
value: padding value of image, could be a sequence of int or float.
if it is float value, the dtype of image will be casted to float32 also.
mask_value: padding value of segmentation map.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, size=0, value=0, mask_value=0, *, order=None):
super().__init__(order)
if isinstance(size, int):
size = (size, size, size, size)
elif isinstance(size, collections.abc.Sequence) and len(size) == 2:
size = (0, size[0], 0, size[1])
elif not (isinstance(size, collections.abc.Sequence) and len(size) == 4):
raise ValueError(
"size should be a list/tuple which contains "
"(top, down, left, right) four pad sizes."
)
self.size = size
self.value = value
if not isinstance(mask_value, int):
raise ValueError(
"mask_value should be a positive integer, "
"but got mask_value={}".format(mask_value)
)
self.mask_value = mask_value
def _apply_image(self, image):
return F.pad(image, self.size, self.value)
def _apply_coords(self, coords):
coords[:, 0] += self.size[2]
coords[:, 1] += self.size[0]
return coords
def _apply_mask(self, mask):
return F.pad(mask, self.size, self.mask_value)
class Resize(VisionTransform):
r"""Resize the input data.
Args:
output_size: target size of image, with (height, width) shape.
interpolation: interpolation method. All methods are listed below:
* cv2.INTER_NEAREST – a nearest-neighbor interpolation.
* cv2.INTER_LINEAR – a bilinear interpolation (used by default).
* cv2.INTER_AREA – resampling using pixel area relation.
* cv2.INTER_CUBIC – a bicubic interpolation over 4×4 pixel neighborhood.
* cv2.INTER_LANCZOS4 – a Lanczos interpolation over 8×8 pixel neighborhood.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, output_size, interpolation=cv2.INTER_LINEAR, *, order=None):
super().__init__(order)
self.output_size = output_size
self.interpolation = interpolation
def apply(self, input: Tuple):
self._shape_info = self._get_shape(self._get_image(input))
return super().apply(input)
def _apply_image(self, image):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return image
return F.resize(image, (th, tw), self.interpolation)
def _apply_coords(self, coords):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return coords
coords[:, 0] = coords[:, 0] * (tw / w)
coords[:, 1] = coords[:, 1] * (th / h)
return coords
def _apply_mask(self, mask):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return mask
return F.resize(mask, (th, tw), cv2.INTER_NEAREST)
def _get_shape(self, image):
h, w, _ = image.shape
if isinstance(self.output_size, int):
if min(h, w) == self.output_size:
return h, w, h, w
if h < w:
th = self.output_size
tw = int(self.output_size * w / h)
else:
tw = self.output_size
th = int(self.output_size * h / w)
return h, w, th, tw
else:
return (h, w, *self.output_size)
class ShortestEdgeResize(VisionTransform):
r"""Resize the input data with specified shortset edge."""
def __init__(
self,
min_size,
max_size,
sample_style="range",
interpolation=cv2.INTER_LINEAR,
*,
order=None
):
super().__init__(order)
if sample_style not in ("range", "choice"):
raise NotImplementedError(
"{} is unsupported sample style".format(sample_style)
)
self.sample_style = sample_style
if isinstance(min_size, int):
min_size = (min_size, min_size)
self.min_size = min_size
self.max_size = max_size
self.interpolation = interpolation
def apply(self, input: Tuple):
self._shape_info = self._get_shape(self._get_image(input))
return super().apply(input)
def _apply_image(self, image):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return image
return F.resize(image, (th, tw), self.interpolation)
def _apply_coords(self, coords):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return coords
coords[:, 0] = coords[:, 0] * (tw / w)
coords[:, 1] = coords[:, 1] * (th / h)
return coords
def _apply_mask(self, mask):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return mask
return F.resize(mask, (th, tw), cv2.INTER_NEAREST)
def _get_shape(self, image):
h, w, _ = image.shape
if self.sample_style == "range":
size = np.random.randint(self.min_size[0], self.min_size[1] + 1)
else:
size = np.random.choice(self.min_size)
scale = size / min(h, w)
if h < w:
th, tw = size, scale * w
else:
th, tw = scale * h, size
if max(th, tw) > self.max_size:
scale = self.max_size / max(th, tw)
th = th * scale
tw = tw * scale
th = int(round(th))
tw = int(round(tw))
return h, w, th, tw
class RandomResize(VisionTransform):
r"""Resize the input data randomly.
Args:
scale_range: range of scaling.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, scale_range, interpolation=cv2.INTER_LINEAR, *, order=None):
super().__init__(order)
self.scale_range = scale_range
self.interpolation = interpolation
def apply(self, input: Tuple):
self._shape_info = self._get_shape(self._get_image(input))
return super().apply(input)
def _apply_image(self, image):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return image
return F.resize(image, (th, tw), self.interpolation)
def _apply_coords(self, coords):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return coords
coords[:, 0] = coords[:, 0] * (tw / w)
coords[:, 1] = coords[:, 1] * (th / h)
return coords
def _apply_mask(self, mask):
h, w, th, tw = self._shape_info
if h == th and w == tw:
return mask
return F.resize(mask, (th, tw), cv2.INTER_NEAREST)
def _get_shape(self, image):
h, w, _ = image.shape
scale = np.random.uniform(*self.scale_range)
th = int(round(h * scale))
tw = int(round(w * scale))
return h, w, th, tw
class RandomCrop(VisionTransform):
r"""Crop the input data randomly. Before applying the crop transform,
pad the image first. If target size is still bigger than the size of
padded image, pad the image size to target size.
Args:
output_size: target size of output image, with (height, width) shape.
padding_size: the same with `size` in ``Pad``.
padding_value: the same with `value` in ``Pad``.
order: the same with :class:`VisionTransform`.
"""
def __init__(
self,
output_size,
padding_size=0,
padding_value=[0, 0, 0],
padding_maskvalue=0,
*,
order=None
):
super().__init__(order)
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
self.output_size = output_size
self.pad = Pad(padding_size, padding_value, order=self.order)
self.padding_value = padding_value
self.padding_maskvalue = padding_maskvalue
def apply(self, input):
input = self.pad.apply(input)
self._h, self._w, _ = self._get_image(input).shape
self._th, self._tw = self.output_size
self._x = np.random.randint(0, max(0, self._w - self._tw) + 1)
self._y = np.random.randint(0, max(0, self._h - self._th) + 1)
return super().apply(input)
def _apply_image(self, image):
if self._th > self._h:
image = F.pad(image, (self._th - self._h, 0), self.padding_value)
if self._tw > self._w:
image = F.pad(image, (0, self._tw - self._w), self.padding_value)
return image[self._y : self._y + self._th, self._x : self._x + self._tw]
def _apply_coords(self, coords):
coords[:, 0] -= self._x
coords[:, 1] -= self._y
return coords
def _apply_mask(self, mask):
if self._th > self._h:
mask = F.pad(mask, (self._th - self._h, 0), self.padding_maskvalue)
if self._tw > self._w:
mask = F.pad(mask, (0, self._tw - self._w), self.padding_maskvalue)
return mask[self._y : self._y + self._th, self._x : self._x + self._tw]
class RandomResizedCrop(VisionTransform):
r"""Crop the input data to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 1.33) of the original aspect ratio is made.
After applying crop transfrom, the input data will be resized to given size.
Args:
output_size: target size of output image, with (height, width) shape.
scale_range: range of size of the origin size cropped. Default: (0.08, 1.0)
ratio_range: range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33)
order: the same with :class:`VisionTransform`.
"""
def __init__(
self,
output_size,
scale_range=(0.08, 1.0),
ratio_range=(3.0 / 4, 4.0 / 3),
interpolation=cv2.INTER_LINEAR,
*,
order=None
):
super().__init__(order)
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
self.output_size = output_size
assert (
scale_range[0] <= scale_range[1]
), "scale_range should be of kind (min, max)"
assert (
ratio_range[0] <= ratio_range[1]
), "ratio_range should be of kind (min, max)"
self.scale_range = scale_range
self.ratio_range = ratio_range
self.interpolation = interpolation
def apply(self, input: Tuple):
self._coord_info = self._get_coord(self._get_image(input))
return super().apply(input)
def _apply_image(self, image):
x, y, w, h = self._coord_info
cropped_img = image[y : y + h, x : x + w]
return F.resize(cropped_img, self.output_size, self.interpolation)
def _apply_coords(self, coords):
x, y, w, h = self._coord_info
coords[:, 0] = (coords[:, 0] - x) * self.output_size[1] / w
coords[:, 1] = (coords[:, 1] - y) * self.output_size[0] / h
return coords
def _apply_mask(self, mask):
x, y, w, h = self._coord_info
cropped_mask = mask[y : y + h, x : x + w]
return F.resize(cropped_mask, self.output_size, cv2.INTER_NEAREST)
def _get_coord(self, image, attempts=10):
height, width, _ = image.shape
area = height * width
for _ in range(attempts):
target_area = np.random.uniform(*self.scale_range) * area
log_ratio = tuple(math.log(x) for x in self.ratio_range)
aspect_ratio = math.exp(np.random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
x = np.random.randint(0, width - w + 1)
y = np.random.randint(0, height - h + 1)
return x, y, w, h
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(self.ratio_range):
w = width
h = int(round(w / min(self.ratio_range)))
elif in_ratio > max(self.ratio_range):
h = height
w = int(round(h * max(self.ratio_range)))
else: # whole image
w = width
h = height
x = (width - w) // 2
y = (height - h) // 2
return x, y, w, h
class CenterCrop(VisionTransform):
r"""Crops the given the input data at the center.
Args:
output_size: target size of output image, with (height, width) shape.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, output_size, *, order=None):
super().__init__(order)
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
self.output_size = output_size
def apply(self, input: Tuple):
self._coord_info = self._get_coord(self._get_image(input))
return super().apply(input)
def _apply_image(self, image):
x, y = self._coord_info
th, tw = self.output_size
return image[y : y + th, x : x + tw]
def _apply_coords(self, coords):
x, y = self._coord_info
coords[:, 0] -= x
coords[:, 1] -= y
return coords
def _apply_mask(self, mask):
x, y = self._coord_info
th, tw = self.output_size
return mask[y : y + th, x : x + tw]
def _get_coord(self, image):
th, tw = self.output_size
h, w, _ = image.shape
assert th <= h and tw <= w, "output size is bigger than image size"
x = int(round((w - tw) / 2.0))
y = int(round((h - th) / 2.0))
return x, y
class RandomHorizontalFlip(VisionTransform):
r"""Horizontally flip the input data randomly with a given probability.
Args:
p: probability of the input data being flipped. Default: 0.5
order: the same with :class:`VisionTransform`.
"""
def __init__(self, prob: float = 0.5, *, order=None):
super().__init__(order)
self.prob = prob
def apply(self, input: Tuple):
self._flipped = np.random.random() < self.prob
self._w = self._get_image(input).shape[1]
return super().apply(input)
def _apply_image(self, image):
if self._flipped:
return F.flip(image, flipCode=1)
return image
def _apply_coords(self, coords):
if self._flipped:
coords[:, 0] = self._w - coords[:, 0]
return coords
def _apply_mask(self, mask):
if self._flipped:
return F.flip(mask, flipCode=1)
return mask
class RandomVerticalFlip(VisionTransform):
r"""Vertically flip the input data randomly with a given probability.
Args:
p: probability of the input data being flipped. Default: 0.5
order: the same with :class:`VisionTransform`.
"""
def __init__(self, prob: float = 0.5, *, order=None):
super().__init__(order)
self.prob = prob
def apply(self, input: Tuple):
self._flipped = np.random.random() < self.prob
self._h = self._get_image(input).shape[0]
return super().apply(input)
def _apply_image(self, image):
if self._flipped:
return F.flip(image, flipCode=0)
return image
def _apply_coords(self, coords):
if self._flipped:
coords[:, 1] = self._h - coords[:, 1]
return coords
def _apply_mask(self, mask):
if self._flipped:
return F.flip(mask, flipCode=0)
return mask
class Normalize(VisionTransform):
r"""Normalize the input data with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels,
this transform will normalize each channel of the input data.
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean: sequence of means for each channel.
std: sequence of standard deviations for each channel.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, mean=0.0, std=1.0, *, order=None):
super().__init__(order)
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
def _apply_image(self, image):
return (image - self.mean) / self.std
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
class GaussianNoise(VisionTransform):
r"""Add random gaussian noise to the input data.
Gaussian noise is generated with given mean and std.
Args:
mean: Gaussian mean used to generate noise.
std: Gaussian standard deviation used to generate noise.
order: the same with :class:`VisionTransform`
"""
def __init__(self, mean=0.0, std=1.0, *, order=None):
super().__init__(order)
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
def _apply_image(self, image):
dtype = image.dtype
noise = np.random.normal(self.mean, self.std, image.shape) * 255
image = image + noise.astype(np.float32)
return np.clip(image, 0, 255).astype(dtype)
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
class BrightnessTransform(VisionTransform):
r"""Adjust brightness of the input data.
Args:
value: how much to adjust the brightness. Can be any
non negative number. 0 gives the original image.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, value, *, order=None):
super().__init__(order)
if value < 0:
raise ValueError("brightness value should be non-negative")
self.value = value
def _apply_image(self, image):
if self.value == 0:
return image
dtype = image.dtype
image = image.astype(np.float32)
alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value)
image = image * alpha
return image.clip(0, 255).astype(dtype)
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
class ContrastTransform(VisionTransform):
r"""Adjust contrast of the input data.
Args:
value: how much to adjust the contrast. Can be any
non negative number. 0 gives the original image.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, value, *, order=None):
super().__init__(order)
if value < 0:
raise ValueError("contrast value should be non-negative")
self.value = value
def _apply_image(self, image):
if self.value == 0:
return image
dtype = image.dtype
image = image.astype(np.float32)
alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value)
image = image * alpha + F.to_gray(image).mean() * (1 - alpha)
return image.clip(0, 255).astype(dtype)
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
class SaturationTransform(VisionTransform):
r"""Adjust saturation of the input data.
Args:
value: how much to adjust the saturation. Can be any
non negative number. 0 gives the original image.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, value, *, order=None):
super().__init__(order)
if value < 0:
raise ValueError("saturation value should be non-negative")
self.value = value
def _apply_image(self, image):
if self.value == 0:
return image
dtype = image.dtype
image = image.astype(np.float32)
alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value)
image = image * alpha + F.to_gray(image) * (1 - alpha)
return image.clip(0, 255).astype(dtype)
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
class HueTransform(VisionTransform):
r"""Adjust hue of the input data.
Args:
value: how much to adjust the hue. Can be any number
between 0 and 0.5, 0 gives the original image.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, value, *, order=None):
super().__init__(order)
if value < 0 or value > 0.5:
raise ValueError("hue value should be in [0.0, 0.5]")
self.value = value
def _apply_image(self, image):
if self.value == 0:
return image
dtype = image.dtype
image = image.astype(np.uint8)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)
h, s, v = cv2.split(hsv_image)
alpha = np.random.uniform(-self.value, self.value)
h = h.astype(np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
h += np.uint8(alpha * 255)
hsv_image = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR_FULL).astype(dtype)
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
class ColorJitter(VisionTransform):
r"""Randomly change the brightness, contrast, saturation and hue of an image.
Args:
brightness: how much to jitter brightness.
Chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast: how much to jitter contrast.
Chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation: how much to jitter saturation.
Chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue: how much to jitter hue.
Chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
order: the same with :class:`VisionTransform`.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, *, order=None):
super().__init__(order)
transforms = []
if brightness != 0:
transforms.append(BrightnessTransform(brightness))
if contrast != 0:
transforms.append(ContrastTransform(contrast))
if saturation != 0:
transforms.append(SaturationTransform(saturation))
if hue != 0:
transforms.append(HueTransform(hue))
self.transforms = Compose(
transforms,
shuffle_indices=[tuple(range(1, len(transforms) + 1))],
order=order,
)
def apply(self, input):
return self.transforms.apply(input)
class Lighting(VisionTransform):
r"""Apply AlexNet-Style "lighting" augmentation to input data.
Input images are assumed to have 'RGB' channel order.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale, *, order=None):
super().__init__(order)
if scale < 0:
raise ValueError("lighting scale should be non-negative")
self.scale = scale
self.eigvec = np.array(
[
[-0.5836, -0.6948, 0.4203],
[-0.5808, -0.0045, -0.8140],
[-0.5675, 0.7192, 0.4009],
]
) # reverse the first dimension for BGR
self.eigval = np.array([0.2175, 0.0188, 0.0045])
def _apply_image(self, image):
if self.scale == 0:
return image
dtype = image.dtype
image = image.astype(np.float32)
alpha = np.random.normal(scale=self.scale * 255, size=3)
image = image + self.eigvec.dot(alpha * self.eigval)
return image.clip(0, 255).astype(dtype)
def _apply_coords(self, coords):
return coords
def _apply_mask(self, mask):
return mask
| [
"megengine.data.transform.vision.functional.pad",
"megengine.data.transform.vision.functional.resize",
"numpy.clip",
"numpy.random.randint",
"numpy.random.normal",
"cv2.cvtColor",
"cv2.split",
"numpy.rollaxis",
"numpy.random.choice",
"math.log",
"numpy.uint8",
"math.sqrt",
"numpy.asarray",
... | [((5400, 5438), 'numpy.concatenate', 'np.concatenate', (['(minxy, maxxy)'], {'axis': '(1)'}), '((minxy, maxxy), axis=1)\n', (5414, 5438), True, 'import numpy as np\n'), ((5724, 5775), 'numpy.concatenate', 'np.concatenate', (['(trans_coords, visibility)'], {'axis': '(-1)'}), '((trans_coords, visibility), axis=-1)\n', (5738, 5775), True, 'import numpy as np\n'), ((13693, 13728), 'megengine.data.transform.vision.functional.pad', 'F.pad', (['image', 'self.size', 'self.value'], {}), '(image, self.size, self.value)\n', (13698, 13728), True, 'from megengine.data.transform.vision import functional as F\n'), ((13912, 13951), 'megengine.data.transform.vision.functional.pad', 'F.pad', (['mask', 'self.size', 'self.mask_value'], {}), '(mask, self.size, self.mask_value)\n', (13917, 13951), True, 'from megengine.data.transform.vision import functional as F\n'), ((15110, 15155), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['image', '(th, tw)', 'self.interpolation'], {}), '(image, (th, tw), self.interpolation)\n', (15118, 15155), True, 'from megengine.data.transform.vision import functional as F\n'), ((15553, 15596), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['mask', '(th, tw)', 'cv2.INTER_NEAREST'], {}), '(mask, (th, tw), cv2.INTER_NEAREST)\n', (15561, 15596), True, 'from megengine.data.transform.vision import functional as F\n'), ((17106, 17151), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['image', '(th, tw)', 'self.interpolation'], {}), '(image, (th, tw), self.interpolation)\n', (17114, 17151), True, 'from megengine.data.transform.vision import functional as F\n'), ((17549, 17592), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['mask', '(th, tw)', 'cv2.INTER_NEAREST'], {}), '(mask, (th, tw), cv2.INTER_NEAREST)\n', (17557, 17592), True, 'from megengine.data.transform.vision import functional as F\n'), ((18886, 18931), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['image', '(th, tw)', 'self.interpolation'], {}), '(image, (th, tw), self.interpolation)\n', (18894, 18931), True, 'from megengine.data.transform.vision import functional as F\n'), ((19329, 19372), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['mask', '(th, tw)', 'cv2.INTER_NEAREST'], {}), '(mask, (th, tw), cv2.INTER_NEAREST)\n', (19337, 19372), True, 'from megengine.data.transform.vision import functional as F\n'), ((19453, 19489), 'numpy.random.uniform', 'np.random.uniform', (['*self.scale_range'], {}), '(*self.scale_range)\n', (19470, 19489), True, 'import numpy as np\n'), ((23476, 23535), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['cropped_img', 'self.output_size', 'self.interpolation'], {}), '(cropped_img, self.output_size, self.interpolation)\n', (23484, 23535), True, 'from megengine.data.transform.vision import functional as F\n'), ((23907, 23966), 'megengine.data.transform.vision.functional.resize', 'F.resize', (['cropped_mask', 'self.output_size', 'cv2.INTER_NEAREST'], {}), '(cropped_mask, self.output_size, cv2.INTER_NEAREST)\n', (23915, 23966), True, 'from megengine.data.transform.vision import functional as F\n'), ((28981, 29013), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (28989, 29013), True, 'import numpy as np\n'), ((29033, 29064), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (29041, 29064), True, 'import numpy as np\n'), ((29712, 29744), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (29720, 29744), True, 'import numpy as np\n'), ((29764, 29795), 'numpy.array', 'np.array', (['std'], {'dtype': 'np.float32'}), '(std, dtype=np.float32)\n', (29772, 29795), True, 'import numpy as np\n'), ((33606, 33649), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV_FULL'], {}), '(image, cv2.COLOR_BGR2HSV_FULL)\n', (33618, 33649), False, 'import cv2\n'), ((33668, 33688), 'cv2.split', 'cv2.split', (['hsv_image'], {}), '(hsv_image)\n', (33677, 33688), False, 'import cv2\n'), ((33706, 33748), 'numpy.random.uniform', 'np.random.uniform', (['(-self.value)', 'self.value'], {}), '(-self.value, self.value)\n', (33723, 33748), True, 'import numpy as np\n'), ((33946, 33966), 'cv2.merge', 'cv2.merge', (['[h, s, v]'], {}), '([h, s, v])\n', (33955, 33966), False, 'import cv2\n'), ((36350, 36447), 'numpy.array', 'np.array', (['[[-0.5836, -0.6948, 0.4203], [-0.5808, -0.0045, -0.814], [-0.5675, 0.7192, \n 0.4009]]'], {}), '([[-0.5836, -0.6948, 0.4203], [-0.5808, -0.0045, -0.814], [-0.5675,\n 0.7192, 0.4009]])\n', (36358, 36447), True, 'import numpy as np\n'), ((36591, 36625), 'numpy.array', 'np.array', (['[0.2175, 0.0188, 0.0045]'], {}), '([0.2175, 0.0188, 0.0045])\n', (36599, 36625), True, 'import numpy as np\n'), ((36801, 36849), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(self.scale * 255)', 'size': '(3)'}), '(scale=self.scale * 255, size=3)\n', (36817, 36849), True, 'import numpy as np\n'), ((11365, 11403), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image[[2, 1, 0]]'], {}), '(image[[2, 1, 0]])\n', (11385, 11403), True, 'import numpy as np\n'), ((11774, 11805), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (11782, 11805), True, 'import numpy as np\n'), ((11865, 11903), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image[[2, 1, 0]]'], {}), '(image[[2, 1, 0]])\n', (11885, 11903), True, 'import numpy as np\n'), ((17717, 17774), 'numpy.random.randint', 'np.random.randint', (['self.min_size[0]', '(self.min_size[1] + 1)'], {}), '(self.min_size[0], self.min_size[1] + 1)\n', (17734, 17774), True, 'import numpy as np\n'), ((17808, 17839), 'numpy.random.choice', 'np.random.choice', (['self.min_size'], {}), '(self.min_size)\n', (17824, 17839), True, 'import numpy as np\n'), ((21055, 21112), 'megengine.data.transform.vision.functional.pad', 'F.pad', (['image', '(self._th - self._h, 0)', 'self.padding_value'], {}), '(image, (self._th - self._h, 0), self.padding_value)\n', (21060, 21112), True, 'from megengine.data.transform.vision import functional as F\n'), ((21164, 21221), 'megengine.data.transform.vision.functional.pad', 'F.pad', (['image', '(0, self._tw - self._w)', 'self.padding_value'], {}), '(image, (0, self._tw - self._w), self.padding_value)\n', (21169, 21221), True, 'from megengine.data.transform.vision import functional as F\n'), ((21511, 21571), 'megengine.data.transform.vision.functional.pad', 'F.pad', (['mask', '(self._th - self._h, 0)', 'self.padding_maskvalue'], {}), '(mask, (self._th - self._h, 0), self.padding_maskvalue)\n', (21516, 21571), True, 'from megengine.data.transform.vision import functional as F\n'), ((21622, 21682), 'megengine.data.transform.vision.functional.pad', 'F.pad', (['mask', '(0, self._tw - self._w)', 'self.padding_maskvalue'], {}), '(mask, (0, self._tw - self._w), self.padding_maskvalue)\n', (21627, 21682), True, 'from megengine.data.transform.vision import functional as F\n'), ((26916, 26934), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (26932, 26934), True, 'import numpy as np\n'), ((27114, 27139), 'megengine.data.transform.vision.functional.flip', 'F.flip', (['image'], {'flipCode': '(1)'}), '(image, flipCode=1)\n', (27120, 27139), True, 'from megengine.data.transform.vision import functional as F\n'), ((27376, 27400), 'megengine.data.transform.vision.functional.flip', 'F.flip', (['mask'], {'flipCode': '(1)'}), '(mask, flipCode=1)\n', (27382, 27400), True, 'from megengine.data.transform.vision import functional as F\n'), ((27859, 27877), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (27875, 27877), True, 'import numpy as np\n'), ((28057, 28082), 'megengine.data.transform.vision.functional.flip', 'F.flip', (['image'], {'flipCode': '(0)'}), '(image, flipCode=0)\n', (28063, 28082), True, 'from megengine.data.transform.vision import functional as F\n'), ((28319, 28343), 'megengine.data.transform.vision.functional.flip', 'F.flip', (['mask'], {'flipCode': '(0)'}), '(mask, flipCode=0)\n', (28325, 28343), True, 'from megengine.data.transform.vision import functional as F\n'), ((29876, 29926), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.std', 'image.shape'], {}), '(self.mean, self.std, image.shape)\n', (29892, 29926), True, 'import numpy as np\n'), ((33859, 33885), 'numpy.errstate', 'np.errstate', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (33870, 33885), True, 'import numpy as np\n'), ((33904, 33925), 'numpy.uint8', 'np.uint8', (['(alpha * 255)'], {}), '(alpha * 255)\n', (33912, 33925), True, 'import numpy as np\n'), ((5117, 5159), 'numpy.array', 'np.array', (['[(0, 1), (2, 1), (0, 3), (2, 3)]'], {}), '([(0, 1), (2, 1), (0, 3), (2, 3)])\n', (5125, 5159), True, 'import numpy as np\n'), ((6502, 6523), 'numpy.rollaxis', 'np.rollaxis', (['image', '(2)'], {}), '(image, 2)\n', (6513, 6523), True, 'import numpy as np\n'), ((6711, 6731), 'numpy.rollaxis', 'np.rollaxis', (['mask', '(2)'], {}), '(mask, 2)\n', (6722, 6731), True, 'import numpy as np\n'), ((11465, 11503), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (11477, 11503), False, 'import cv2\n'), ((11965, 12003), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (11977, 12003), False, 'import cv2\n'), ((24144, 24180), 'numpy.random.uniform', 'np.random.uniform', (['*self.scale_range'], {}), '(*self.scale_range)\n', (24161, 24180), True, 'import numpy as np\n'), ((24293, 24322), 'numpy.random.uniform', 'np.random.uniform', (['*log_ratio'], {}), '(*log_ratio)\n', (24310, 24322), True, 'import numpy as np\n'), ((24529, 24564), 'numpy.random.randint', 'np.random.randint', (['(0)', '(width - w + 1)'], {}), '(0, width - w + 1)\n', (24546, 24564), True, 'import numpy as np\n'), ((24585, 24621), 'numpy.random.randint', 'np.random.randint', (['(0)', '(height - h + 1)'], {}), '(0, height - h + 1)\n', (24602, 24621), True, 'import numpy as np\n'), ((29997, 30019), 'numpy.clip', 'np.clip', (['image', '(0)', '(255)'], {}), '(image, 0, 255)\n', (30004, 30019), True, 'import numpy as np\n'), ((32756, 32772), 'megengine.data.transform.vision.functional.to_gray', 'F.to_gray', (['image'], {}), '(image)\n', (32765, 32772), True, 'from megengine.data.transform.vision import functional as F\n'), ((33982, 34029), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv_image', 'cv2.COLOR_HSV2BGR_FULL'], {}), '(hsv_image, cv2.COLOR_HSV2BGR_FULL)\n', (33994, 34029), False, 'import cv2\n'), ((24218, 24229), 'math.log', 'math.log', (['x'], {}), '(x)\n', (24226, 24229), False, 'import math\n'), ((24351, 24388), 'math.sqrt', 'math.sqrt', (['(target_area * aspect_ratio)'], {}), '(target_area * aspect_ratio)\n', (24360, 24388), False, 'import math\n'), ((24417, 24454), 'math.sqrt', 'math.sqrt', (['(target_area / aspect_ratio)'], {}), '(target_area / aspect_ratio)\n', (24426, 24454), False, 'import math\n'), ((10413, 10439), 'numpy.random.permutation', 'np.random.permutation', (['idx'], {}), '(idx)\n', (10434, 10439), True, 'import numpy as np\n'), ((31804, 31820), 'megengine.data.transform.vision.functional.to_gray', 'F.to_gray', (['image'], {}), '(image)\n', (31813, 31820), True, 'from megengine.data.transform.vision import functional as F\n'), ((5187, 5204), 'numpy.asarray', 'np.asarray', (['boxes'], {}), '(boxes)\n', (5197, 5204), True, 'import numpy as np\n')] |
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
class TrainingConfig:
lr=3e-4
betas=(0.9,0.995)
weight_decay=5e-4
num_workers=0
max_epochs=10
batch_size=64
ckpt_path=None #Specify a model path here. Ex: "./Model.pt"
shuffle=True
pin_memory=True
verbose=True
def __init__(self,**kwargs):
for key,value in kwargs.items():
setattr(self,key,value)
class Trainer:
def __init__(self,model,train_dataset,test_dataset,config):
self.model = model
self.train_dataset=train_dataset
self.test_dataset=test_dataset
self.config = config
self.train_losses = []
self.train_accuracies = []
self.test_losses = []
self.test_accuracies = []
self.device = "cpu"
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.model = self.model.to(self.device)
def save_checkpoint(self):
raw_model = self.model.module if hasattr(self.model,"module") else self.model
torch.save(raw_model.state_dict(),self.config.ckpt_path)
print("Model Saved!")
def train(self):
model,config = self.model,self.config
raw_model = self.model.module if hasattr(self.model,"module") else self.model
optimizer = raw_model.configure_optimizers(config)
def run_epoch(split):
is_train = split=="train"
if is_train:
model.train()
else:
model.eval() #important don't miss this. Since we have used dropout, this is required.
data = self.train_dataset if is_train else self.test_dataset
loader = DataLoader(data,batch_size=config.batch_size,
shuffle=config.shuffle,
pin_memory=config.pin_memory,
num_workers=config.num_workers)
losses = []
accuracies = []
correct = 0
num_samples = 0
pbar = tqdm(enumerate(loader),total=len(loader)) if is_train and config.verbose else enumerate(loader)
for it,(images,targets) in pbar:
images = images.to(self.device)
targets = targets.to(self.device)
num_samples += targets.size(0)
with torch.set_grad_enabled(is_train):
#forward the model
logits,loss = model(images,targets)
loss = loss.mean()
losses.append(loss.item())
with torch.no_grad():
predictions = torch.argmax(logits,dim=1) #softmax gives prob distribution. Find the index of max prob
correct+= predictions.eq(targets).sum().item()
accuracies.append(correct/num_samples)
if is_train:
model.zero_grad()
loss.backward()
optimizer.step()
if config.verbose:
pbar.set_description(f"Epoch:{epoch+1} iteration:{it+1} | loss:{np.mean(losses)} accuracy:{np.mean(accuracies)} lr:{config.lr}")
self.train_losses.append(np.mean(losses))
self.train_accuracies.append(np.mean(accuracies))
if not is_train:
test_loss = np.mean(losses)
if config.verbose:
print(f"\nEpoch:{epoch+1} | Test Loss:{test_loss} Test Accuracy:{correct/num_samples}\n")
self.test_losses.append(test_loss)
self.test_accuracies.append(correct/num_samples)
return test_loss
best_loss = float('inf')
test_loss = float('inf')
for epoch in range(config.max_epochs):
run_epoch('train')
if self.test_dataset is not None:
test_loss = run_epoch("test")
good_model = self.test_dataset is not None and test_loss < best_loss
if config.ckpt_path is not None and good_model:
best_loss = test_loss
self.save_checkpoint() | [
"torch.utils.data.DataLoader",
"torch.argmax",
"numpy.mean",
"torch.cuda.is_available",
"torch.set_grad_enabled",
"torch.cuda.current_device",
"torch.no_grad"
] | [((907, 932), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (930, 932), False, 'import torch\n'), ((961, 988), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (986, 988), False, 'import torch\n'), ((1846, 1982), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'config.batch_size', 'shuffle': 'config.shuffle', 'pin_memory': 'config.pin_memory', 'num_workers': 'config.num_workers'}), '(data, batch_size=config.batch_size, shuffle=config.shuffle,\n pin_memory=config.pin_memory, num_workers=config.num_workers)\n', (1856, 1982), False, 'from torch.utils.data import DataLoader\n'), ((3705, 3720), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3712, 3720), True, 'import numpy as np\n'), ((2563, 2595), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['is_train'], {}), '(is_train)\n', (2585, 2595), False, 'import torch\n'), ((2826, 2841), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2839, 2841), False, 'import torch\n'), ((2878, 2905), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2890, 2905), False, 'import torch\n'), ((3544, 3559), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3551, 3559), True, 'import numpy as np\n'), ((3611, 3630), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (3618, 3630), True, 'import numpy as np\n'), ((3411, 3426), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3418, 3426), True, 'import numpy as np\n'), ((3438, 3457), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (3445, 3457), True, 'import numpy as np\n')] |
import json
import re
import argparse
from difflib import SequenceMatcher
from pprint import pprint
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from terminaltables import AsciiTable
from transformers import AutoTokenizer
import stanza
import udon2
from udon2.kernels import ConvPartialTreeKernel
def exists_in_distractors(distractors, dataset):
data = dataset["data"]
for x in data:
for alt in x["choices"]:
comment = alt["extra"].get("comment") if alt["extra"] else None
if alt["type"] == "Distractor" and (alt["text"] in distractors or (comment and comment in distractors)):
return True
return False
def all_exist_in_distractors(distractors, dataset):
data = dataset["data"]
mask = [False] * len(distractors)
for x in data:
for alt in x["choices"]:
comment = alt["extra"].get("comment") if alt["extra"] else None
if alt["type"] == "Distractor":
for i, d in enumerate(distractors):
if alt["text"] == d or (comment and comment == d):
mask[i] = True
return all(mask)
def exists_in_context(distractors, dataset):
if type(dataset) == str:
for d in distractors:
if dataset.find(d) != -1:
return True
else:
data = dataset["data"]
for x in data:
for d in distractors:
if x["context"].find(d) != -1:
return True
return False
def all_exist_in_context(distractors, dataset):
mask = [False] * len(distractors)
if type(dataset) == str:
for i, d in enumerate(distractors):
if dataset.find(d) != -1:
mask[i] = True
else:
data = dataset["data"]
for x in data:
for i, d in enumerate(distractors):
if x["context"].find(d) != -1:
mask[i] = True
return all(mask)
def is_same_context(ctx, dataset, overlap=False):
if overlap:
Nctx = len(ctx)
limit = Nctx / 4
data = dataset["data"]
for x in data:
match = SequenceMatcher(None, x["context"], ctx).find_longest_match(0, len(x["context"]), 0, Nctx)
if match.size > limit:
return True
else:
data = dataset["data"]
for x in data:
if x["context"] == ctx:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, required=True, help="Report file to process")
parser.add_argument('-t', '--training-data', type=str, default="", help="Training data file")
args = parser.parse_args()
tok = AutoTokenizer.from_pretrained("KB/bert-base-swedish-cased")
training_data = json.load(open(args.training_data)) if args.training_data else None
SPECIAL_TOKENS_REGEX = r"(\[SEP\]|\[[A-Z]\]|')"
sv = stanza.Pipeline(lang="sv", processors='tokenize,lemma,pos,depparse')
so_kernel = ConvPartialTreeKernel("GRCT", includeForm=False)
so_feats_kernel = ConvPartialTreeKernel("GRCT", includeForm=False, includeFeats=True)
examples = []
report = {
"total": 0,
"correct_in_distractors": [],
"any_same_distractors": [],
"all_same_distractors": [],
"avg_length_difference": defaultdict(list),
"any_different_capitalization": [],
"any_start_with_same_word": [],
"all_start_with_same_word": [],
"subseq_repetitive_words": [],
"empty_distractors": [],
"any_exists_in_context": [],
"any_exists_in_context_and_training_ctx": [],
"any_exists_in_context_and_training_dis": [],
"any_exists_in_training_distractors": [],
"any_exists_in_training_context": [],
"all_exist_in_context": [],
"all_exist_in_context_and_training_ctx": [],
"all_exist_in_context_and_training_dis": [],
"all_exist_in_training_distractors": [],
"all_exist_in_training_context": [],
"is_context_in_training_data": [],
"context_overlaps_with_training_data": [],
"any_predicted_gold_distractors": [],
"ca_norm_tree_kernel": [],
"ca_feats_norm_tree_kernel": [],
"start_with_same_pos": 0,
"start_with_same_pos_morph": 0,
"tp": 0,
"p": 0
}
inside_example, current_id = False, -1
gen_context_position = []
with open(args.file) as f:
for line in f:
line = line.strip()
if line.startswith("[CLS]"):
inside_example = True
correct = re.sub(SPECIAL_TOKENS_REGEX, "", line.split("[SEP]")[2])
examples.append({
"text": line,
"correct": correct.strip(),
"gold": None,
"gen": None
})
current_id += 1
elif inside_example:
if examples[-1]["gen"]:
examples[-1]["gold"] = [
re.sub(SPECIAL_TOKENS_REGEX, "", x).strip()
for x in line[1:-1].split("', '")
]
else:
examples[-1]["gen"] = [
re.sub(SPECIAL_TOKENS_REGEX, "", x).strip()
for x in line[1:-1].split("', '")
]
gen, gold = examples[-1]["gen"], examples[-1]["gold"]
correct = examples[-1]["correct"]
context = examples[-1]["text"]
if gen and gold:
set_gen, set_gold = set(gen), set(gold)
ca = udon2.Importer.from_stanza(sv(correct).to_dict())[0]
ca_norm = np.sqrt(so_kernel(ca, ca))
ca_feats_norm = np.sqrt(so_feats_kernel(ca, ca))
for g in set_gen:
if g:
gd = udon2.Importer.from_stanza(sv(g).to_dict())[0]
report["ca_norm_tree_kernel"].append(
so_kernel(ca, gd) / (ca_norm * np.sqrt(so_kernel(gd, gd))))
report["ca_feats_norm_tree_kernel"].append(
so_feats_kernel(ca, gd) / (ca_feats_norm * np.sqrt(so_feats_kernel(gd, gd))))
report["tp"] += g in set_gold
report["p"] += len(set_gold)
if correct in set_gen:
report["correct_in_distractors"].append(current_id)
if len(set_gen) != len(gen):
report["any_same_distractors"].append(current_id)
if len(set_gen) == 1:
report["all_same_distractors"].append(current_id)
if set_gen & set_gold:
report["any_predicted_gold_distractors"].append(current_id)
correct_capitalized = correct[0].isupper()
if any([correct_capitalized != x[0].isupper() for x in gen if x]):
report["any_different_capitalization"].append(current_id)
# this assumes a whitespace tokenization
cwords = correct.split()
dwords = [x.split() for x in gen]
Nc, Nd = len(cwords), [len(x) for x in dwords]
diff = [abs(Nc - Ndd) for Ndd in Nd]
report["avg_length_difference"][sum(diff) / len(diff)].append(current_id)
if any([x == 0 for x in Nd]):
report["empty_distractors"].append(current_id)
same_first_word = [cwords[0] == x[0] for x in dwords if x]
all_same_first_word = all(same_first_word)
if any(same_first_word) and not all_same_first_word:
report["any_start_with_same_word"].append(current_id)
if all_same_first_word:
report["all_start_with_same_word"].append(current_id)
if any([any([y == z for y, z in zip(x[:-1], x[1:])]) for x in dwords]):
report["subseq_repetitive_words"].append(current_id)
inside_example = False
report["total"] += 1
if is_same_context(context, training_data):
report["is_context_in_training_data"].append(current_id)
# if is_same_context(context, training_data, overlap=True):
# report["context_overlaps_with_training_data"].append(current_id)
if training_data:
gen_in_train_ctx = exists_in_context(gen, training_data)
gen_in_train_dis = exists_in_distractors(gen, training_data)
if gen_in_train_ctx:
all_gen_in_train_ctx = all_exist_in_context(gen, training_data)
else:
all_gen_in_train_ctx = False
if gen_in_train_dis:
all_gen_in_train_dis = all_exist_in_distractors(gen, training_data)
else:
all_gen_in_train_dis = False
else:
gen_in_train_ctx, gen_in_train_dis = False, False
if exists_in_context(gen, context):
report["any_exists_in_context"].append(current_id)
if all_exist_in_context(gen, context):
report["all_exist_in_context"].append(current_id)
if all_gen_in_train_ctx:
report["all_exist_in_context_and_training_ctx"].append(current_id)
if all_gen_in_train_dis:
report["all_exist_in_context_and_training_dis"].append(current_id)
if gen_in_train_ctx:
report["any_exists_in_context_and_training_ctx"].append(current_id)
if gen_in_train_dis:
report["any_exists_in_context_and_training_dis"].append(current_id)
if gen_in_train_ctx:
report["any_exists_in_training_context"].append(current_id)
if all_gen_in_train_ctx:
report["all_exist_in_training_context"].append(current_id)
if gen_in_train_dis:
report["any_exists_in_training_distractors"].append(current_id)
if all_gen_in_train_dis:
report["all_exist_in_training_distractors"].append(current_id)
for gdis in gen:
ddp = context.find(gdis)
if ddp > -1:
gen_context_position.append(len(tok.tokenize(context[:ddp])))
else:
inside_example = False
# pprint(report)
print(len(report["ca_norm_tree_kernel"]))
mode = stats.mode(report["ca_norm_tree_kernel"])
feats_mode = stats.mode(report["ca_feats_norm_tree_kernel"])
table_data = [
["Metric", "Value"],
["Total", report["total"]],
["Any of the generated distractors matches with a gold one", "{}%".format(
round(len(report["any_predicted_gold_distractors"]) * 100 / report["total"], 2))],
["The correct answer is among generated distractors", "{}%".format(
round(len(report["correct_in_distractors"]) * 100 / report["total"], 2))],
["Any (but not all) generated distractors are the same", "{}%".format(
round(len(report["any_same_distractors"]) * 100 / report["total"], 2))],
["All generated distractors are the same", "{}%".format(
round(len(report["all_same_distractors"]) * 100 / report["total"], 2))],
["Any distractor is capitalized differently from the correct answer", "{}%".format(
round(len(report["any_different_capitalization"]) * 100 / report["total"], 2))],
["Any distractor contains repetitive words", "{}%".format(
round(len(report["subseq_repetitive_words"]) * 100 / report["total"], 2))],
["Any distractor is an empty string", "{}%".format(
round(len(report["empty_distractors"]) * 100 / report["total"], 2))],
["(A) Any distractor is in its own context", "{}%".format(
round(len(report["any_exists_in_context"]) * 100 / report["total"], 2))],
["(B) Any distractor is in any context from training data", "{}%".format(
round(len(report["any_exists_in_training_context"]) * 100 / report["total"], 2))],
["(C) Any distractor is a distractor from training data", "{}%".format(
round(len(report["any_exists_in_training_distractors"]) * 100 / report["total"], 2))],
["(A) and (B)", "{}%".format(
round(len(report["any_exists_in_context_and_training_ctx"]) * 100 / report["total"], 2))],
["(A) and (C)", "{}%".format(
round(len(report["any_exists_in_context_and_training_dis"]) * 100 / report["total"], 2))],
["(A1) All distractors are in their own context", "{}%".format(
round(len(report["all_exist_in_context"]) * 100 / report["total"], 2))],
["(B1) All distractors are in any context from training data", "{}%".format(
round(len(report["all_exist_in_training_context"]) * 100 / report["total"], 2))],
["(C1) All distractors are distractors from training data", "{}%".format(
round(len(report["all_exist_in_training_distractors"]) * 100 / report["total"], 2))],
["(A1) and (B1)", "{}%".format(
round(len(report["all_exist_in_context_and_training_ctx"]) * 100 / report["total"], 2))],
["(A1) and (C1)", "{}%".format(
round(len(report["all_exist_in_context_and_training_dis"]) * 100 / report["total"], 2))],
["Normalized conv. kernel (SO)", "{} +/- {}".format(
round(np.mean(report["ca_norm_tree_kernel"]), 2),
round(np.std(report["ca_norm_tree_kernel"]), 2))],
["Median normalized conv. kernel (SO)", "{}".format(
round(np.median(report["ca_norm_tree_kernel"]), 2))],
["Mode normalized conv. kernel (SO)", "{} ({}%)".format(
round(mode[0][0], 2), round(mode[1][0] * 100 / len(report["ca_norm_tree_kernel"]), 2))],
["Normalized conv. kernel (SO, feats)", "{} +/- {}".format(
round(np.mean(report["ca_feats_norm_tree_kernel"]), 2),
round(np.std(report["ca_feats_norm_tree_kernel"]), 2))],
["Median normalized conv. kernel (SO, feats)", "{}".format(
round(np.median(report["ca_feats_norm_tree_kernel"]), 2))],
["Mode normalized conv. kernel (SO, feats)", "{} ({}%)".format(
round(feats_mode[0][0], 2), round(feats_mode[1][0] * 100 / len(report["ca_feats_norm_tree_kernel"]), 2))],
["Distractor recall", "{}%".format(round(report["tp"] * 100 / report["p"], 2))]
# ["A context exists in training data", "{}%".format(
# round(len(report["is_context_in_training_data"]) * 100 / report["total"], 2))]
# ["A context overlaps with training data (> 25\% overlap)", "{}%".format(
# round(len(report["context_overlaps_with_training_data"]) * 100 / report["total"], 2))]
]
t = AsciiTable(table_data)
print(t.table)
plt.hist(gen_context_position, bins=range(min(gen_context_position), max(gen_context_position)))
plt.show() | [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"scipy.stats.mode",
"numpy.std",
"numpy.median",
"terminaltables.AsciiTable",
"udon2.kernels.ConvPartialTreeKernel",
"difflib.SequenceMatcher",
"collections.defaultdict",
"transformers.AutoTokenizer.from_pretrained",
"numpy.mean",
"stanza.Pi... | [((2553, 2578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2576, 2578), False, 'import argparse\n'), ((2815, 2874), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""KB/bert-base-swedish-cased"""'], {}), "('KB/bert-base-swedish-cased')\n", (2844, 2874), False, 'from transformers import AutoTokenizer\n'), ((3027, 3095), 'stanza.Pipeline', 'stanza.Pipeline', ([], {'lang': '"""sv"""', 'processors': '"""tokenize,lemma,pos,depparse"""'}), "(lang='sv', processors='tokenize,lemma,pos,depparse')\n", (3042, 3095), False, 'import stanza\n'), ((3112, 3160), 'udon2.kernels.ConvPartialTreeKernel', 'ConvPartialTreeKernel', (['"""GRCT"""'], {'includeForm': '(False)'}), "('GRCT', includeForm=False)\n", (3133, 3160), False, 'from udon2.kernels import ConvPartialTreeKernel\n'), ((3183, 3250), 'udon2.kernels.ConvPartialTreeKernel', 'ConvPartialTreeKernel', (['"""GRCT"""'], {'includeForm': '(False)', 'includeFeats': '(True)'}), "('GRCT', includeForm=False, includeFeats=True)\n", (3204, 3250), False, 'from udon2.kernels import ConvPartialTreeKernel\n'), ((11339, 11380), 'scipy.stats.mode', 'stats.mode', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (11349, 11380), False, 'from scipy import stats\n'), ((11398, 11445), 'scipy.stats.mode', 'stats.mode', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (11408, 11445), False, 'from scipy import stats\n'), ((15716, 15738), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (15726, 15738), False, 'from terminaltables import AsciiTable\n'), ((15864, 15874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15872, 15874), True, 'import matplotlib.pyplot as plt\n'), ((3448, 3465), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3459, 3465), False, 'from collections import defaultdict\n'), ((2212, 2252), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', "x['context']", 'ctx'], {}), "(None, x['context'], ctx)\n", (2227, 2252), False, 'from difflib import SequenceMatcher\n'), ((14338, 14376), 'numpy.mean', 'np.mean', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (14345, 14376), True, 'import numpy as np\n'), ((14400, 14437), 'numpy.std', 'np.std', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (14406, 14437), True, 'import numpy as np\n'), ((14524, 14564), 'numpy.median', 'np.median', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (14533, 14564), True, 'import numpy as np\n'), ((14824, 14868), 'numpy.mean', 'np.mean', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (14831, 14868), True, 'import numpy as np\n'), ((14892, 14935), 'numpy.std', 'np.std', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (14898, 14935), True, 'import numpy as np\n'), ((15029, 15075), 'numpy.median', 'np.median', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (15038, 15075), True, 'import numpy as np\n'), ((5166, 5201), 're.sub', 're.sub', (['SPECIAL_TOKENS_REGEX', '""""""', 'x'], {}), "(SPECIAL_TOKENS_REGEX, '', x)\n", (5172, 5201), False, 'import re\n'), ((5380, 5415), 're.sub', 're.sub', (['SPECIAL_TOKENS_REGEX', '""""""', 'x'], {}), "(SPECIAL_TOKENS_REGEX, '', x)\n", (5386, 5415), False, 'import re\n')] |
'''
From https://github.com/tsc2017/Frechet-Inception-Distance
Code derived from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
Usage:
Call get_fid(images1, images2)
Args:
images1, images2: Numpy arrays with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary.
dtype of the images is recommended to be np.uint8 to save CPU memory.
Returns:
Frechet Inception Distance between the two image distributions.
'''
import tensorflow as tf
import os, sys
import functools
import numpy as np
import time
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
tfgan = tf.contrib.gan
session = tf.InteractiveSession()
# A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown
BATCH_SIZE = 64
# Run images through Inception.
inception_images = tf.placeholder(tf.float32, [BATCH_SIZE, 3, None, None])
activations1 = tf.placeholder(tf.float32, [None, None], name = 'activations1')
activations2 = tf.placeholder(tf.float32, [None, None], name = 'activations2')
fcd = tfgan.eval.frechet_classifier_distance_from_activations(activations1, activations2)
def inception_activations(images = inception_images, num_splits = 1):
images = tf.transpose(images, [0, 2, 3, 1])
size = 299
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(images, num_or_size_splits = num_splits)
activations = functional_ops.map_fn(
fn = functools.partial(tfgan.eval.run_inception, output_tensor = 'pool_3:0'),
elems = array_ops.stack(generated_images_list),
parallel_iterations = 1,
back_prop = False,
swap_memory = True,
name = 'RunClassifier')
activations = array_ops.concat(array_ops.unstack(activations), 0)
return activations
activations =inception_activations()
def get_inception_activations(inps):
n_batches = inps.shape[0]//BATCH_SIZE
act = np.zeros([n_batches * BATCH_SIZE, 2048], dtype = np.float32)
for i in range(n_batches):
inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1
act[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] = activations.eval(feed_dict = {inception_images: inp})
return act
def activations2distance(act1, act2):
return fcd.eval(feed_dict = {activations1: act1, activations2: act2})
def get_fid(images1, images2):
assert(type(images1) == np.ndarray)
assert(len(images1.shape) == 4)
assert(images1.shape[1] == 3)
assert(np.min(images1[0]) >= 0 and np.max(images1[0]) > 10), 'Image values should be in the range [0, 255]'
assert(type(images2) == np.ndarray)
assert(len(images2.shape) == 4)
assert(images2.shape[1] == 3)
assert(np.min(images2[0]) >= 0 and np.max(images2[0]) > 10), 'Image values should be in the range [0, 255]'
assert(images1.shape == images2.shape), 'The two numpy arrays must have the same shape'
print('Calculating FID with %i images from each distribution' % (images1.shape[0]))
start_time = time.time()
act1 = get_inception_activations(images1)
act2 = get_inception_activations(images2)
fid = activations2distance(act1, act2)
print('FID calculation time: %f s' % (time.time() - start_time))
return fid
| [
"functools.partial",
"numpy.zeros",
"tensorflow.transpose",
"time.time",
"tensorflow.placeholder",
"tensorflow.python.ops.array_ops.split",
"numpy.min",
"numpy.max",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.InteractiveSession",
"tensorflow.python.ops.array_ops.stack",
"tensorflow... | [((780, 803), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (801, 803), True, 'import tensorflow as tf\n'), ((959, 1014), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[BATCH_SIZE, 3, None, None]'], {}), '(tf.float32, [BATCH_SIZE, 3, None, None])\n', (973, 1014), True, 'import tensorflow as tf\n'), ((1030, 1091), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {'name': '"""activations1"""'}), "(tf.float32, [None, None], name='activations1')\n", (1044, 1091), True, 'import tensorflow as tf\n'), ((1109, 1170), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {'name': '"""activations2"""'}), "(tf.float32, [None, None], name='activations2')\n", (1123, 1170), True, 'import tensorflow as tf\n'), ((1347, 1381), 'tensorflow.transpose', 'tf.transpose', (['images', '[0, 2, 3, 1]'], {}), '(images, [0, 2, 3, 1])\n', (1359, 1381), True, 'import tensorflow as tf\n'), ((1410, 1456), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['images', '[size, size]'], {}), '(images, [size, size])\n', (1434, 1456), True, 'import tensorflow as tf\n'), ((1485, 1539), 'tensorflow.python.ops.array_ops.split', 'array_ops.split', (['images'], {'num_or_size_splits': 'num_splits'}), '(images, num_or_size_splits=num_splits)\n', (1500, 1539), False, 'from tensorflow.python.ops import array_ops\n'), ((2066, 2124), 'numpy.zeros', 'np.zeros', (['[n_batches * BATCH_SIZE, 2048]'], {'dtype': 'np.float32'}), '([n_batches * BATCH_SIZE, 2048], dtype=np.float32)\n', (2074, 2124), True, 'import numpy as np\n'), ((3144, 3155), 'time.time', 'time.time', ([], {}), '()\n', (3153, 3155), False, 'import time\n'), ((1880, 1910), 'tensorflow.python.ops.array_ops.unstack', 'array_ops.unstack', (['activations'], {}), '(activations)\n', (1897, 1910), False, 'from tensorflow.python.ops import array_ops\n'), ((1596, 1665), 'functools.partial', 'functools.partial', (['tfgan.eval.run_inception'], {'output_tensor': '"""pool_3:0"""'}), "(tfgan.eval.run_inception, output_tensor='pool_3:0')\n", (1613, 1665), False, 'import functools\n'), ((1685, 1723), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['generated_images_list'], {}), '(generated_images_list)\n', (1700, 1723), False, 'from tensorflow.python.ops import array_ops\n'), ((2624, 2642), 'numpy.min', 'np.min', (['images1[0]'], {}), '(images1[0])\n', (2630, 2642), True, 'import numpy as np\n'), ((2652, 2670), 'numpy.max', 'np.max', (['images1[0]'], {}), '(images1[0])\n', (2658, 2670), True, 'import numpy as np\n'), ((2846, 2864), 'numpy.min', 'np.min', (['images2[0]'], {}), '(images2[0])\n', (2852, 2864), True, 'import numpy as np\n'), ((2874, 2892), 'numpy.max', 'np.max', (['images2[0]'], {}), '(images2[0])\n', (2880, 2892), True, 'import numpy as np\n'), ((3333, 3344), 'time.time', 'time.time', ([], {}), '()\n', (3342, 3344), False, 'import time\n')] |
#!/usr/bin/python3.6
import os
import re
import sys
import yaml
from glob import glob
from collections import OrderedDict
from typing import List
import numpy as np
import pandas as pd
from tqdm import tqdm
from metrics import F_score
from debug import dprint
IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None
INPUT_PATH = '../input/imet-2019-fgvc6/' if IN_KERNEL else '../input/'
NUM_ATTEMPTS = 100
NUM_FOLDS = 5
NUM_CLASSES = 1103
if __name__ == '__main__':
if len(sys.argv) < 4:
print(f'usage: {sys.argv[0]} <ensemble_name> predict1.npy ...')
sys.exit()
ensemble_name, predicts = sys.argv[1], sys.argv[2:]
level1_filenames: List[List[str]] = []
level1_train_predicts: List[List[np.array]] = []
# load labels
fold_num = np.load('folds.npy')
train_df = pd.read_csv(INPUT_PATH + 'train.csv')
def parse_labels(s: str) -> np.array:
res = np.zeros(NUM_CLASSES)
res[list(map(int, s.split()))] = 1
return res - 0.5 # we use zero threshold instead of 0.5
all_labels = np.vstack(list(map(parse_labels, train_df.attribute_ids)))
dprint(fold_num.shape)
dprint(all_labels.shape)
# build a list of models, for every model build a list of predicts
for predict in predicts:
assert 'level1_train_' in predict
m = re.match(r'(.*)_f(\d)_e\d+.*\.npy', predict)
assert m
model_path = m.group(1)
level1_fnames, level1_train = [], []
for fold in range(NUM_FOLDS):
filenames = glob(f'{model_path}_f{fold}_*.npy')
assert len(filenames) == 1 # the model must be unique in this fold
filename = filenames[0]
print('found', filename)
level1_fnames.append(filename)
level1_train.append(np.load(filename))
level1_filenames.append(level1_fnames)
level1_train_predicts.append(level1_train)
# search for the best blend weights
best_weights = np.ones(len(level1_train_predicts))
best_score = 0.0
for _ in tqdm(range(NUM_ATTEMPTS)):
# print('-' * 50)
weights = np.random.rand(len(level1_train_predicts))
weights /= sum(weights)
all_predicts = np.zeros_like(all_labels)
for lvl1_predicts, w in zip(level1_train_predicts, weights):
model_predict = np.zeros_like(all_labels)
for fold, lvl1_pred in enumerate(lvl1_predicts):
predict = lvl1_pred * w
model_predict[fold_num == fold] = predict
all_predicts += model_predict
score = F_score(all_predicts, all_labels, beta=2, threshold=0)
if score > best_score:
best_score, best_weights = score, weights
print('best_score', best_score, 'weights', weights)
# generate an ensemble description file
ensemble = []
for model, weight in zip(level1_filenames, best_weights):
model_filenames = [os.path.basename(f) for f in model]
ensemble.append({'predicts': model_filenames, 'weight': weight.item()})
filename = f'{ensemble_name}_val_{best_score:.04f}.yml'
print('saving weights to', filename)
with open(filename, 'w') as f:
yaml.dump(ensemble, f)
| [
"numpy.load",
"numpy.zeros_like",
"metrics.F_score",
"pandas.read_csv",
"os.path.basename",
"yaml.dump",
"numpy.zeros",
"re.match",
"os.environ.get",
"debug.dprint",
"glob.glob",
"sys.exit"
] | [((277, 313), 'os.environ.get', 'os.environ.get', (['"""KAGGLE_WORKING_DIR"""'], {}), "('KAGGLE_WORKING_DIR')\n", (291, 313), False, 'import os\n'), ((781, 801), 'numpy.load', 'np.load', (['"""folds.npy"""'], {}), "('folds.npy')\n", (788, 801), True, 'import numpy as np\n'), ((817, 854), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_PATH + 'train.csv')"], {}), "(INPUT_PATH + 'train.csv')\n", (828, 854), True, 'import pandas as pd\n'), ((1125, 1147), 'debug.dprint', 'dprint', (['fold_num.shape'], {}), '(fold_num.shape)\n', (1131, 1147), False, 'from debug import dprint\n'), ((1152, 1176), 'debug.dprint', 'dprint', (['all_labels.shape'], {}), '(all_labels.shape)\n', (1158, 1176), False, 'from debug import dprint\n'), ((583, 593), 'sys.exit', 'sys.exit', ([], {}), '()\n', (591, 593), False, 'import sys\n'), ((912, 933), 'numpy.zeros', 'np.zeros', (['NUM_CLASSES'], {}), '(NUM_CLASSES)\n', (920, 933), True, 'import numpy as np\n'), ((1332, 1378), 're.match', 're.match', (['"""(.*)_f(\\\\d)_e\\\\d+.*\\\\.npy"""', 'predict'], {}), "('(.*)_f(\\\\d)_e\\\\d+.*\\\\.npy', predict)\n", (1340, 1378), False, 'import re\n'), ((2216, 2241), 'numpy.zeros_like', 'np.zeros_like', (['all_labels'], {}), '(all_labels)\n', (2229, 2241), True, 'import numpy as np\n'), ((2586, 2640), 'metrics.F_score', 'F_score', (['all_predicts', 'all_labels'], {'beta': '(2)', 'threshold': '(0)'}), '(all_predicts, all_labels, beta=2, threshold=0)\n', (2593, 2640), False, 'from metrics import F_score\n'), ((3206, 3228), 'yaml.dump', 'yaml.dump', (['ensemble', 'f'], {}), '(ensemble, f)\n', (3215, 3228), False, 'import yaml\n'), ((1534, 1569), 'glob.glob', 'glob', (['f"""{model_path}_f{fold}_*.npy"""'], {}), "(f'{model_path}_f{fold}_*.npy')\n", (1538, 1569), False, 'from glob import glob\n'), ((2340, 2365), 'numpy.zeros_like', 'np.zeros_like', (['all_labels'], {}), '(all_labels)\n', (2353, 2365), True, 'import numpy as np\n'), ((2944, 2963), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2960, 2963), False, 'import os\n'), ((1798, 1815), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1805, 1815), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import, division
import numpy as np
from distutils.version import LooseVersion
def allbadtonan(function):
"""
Wrapper of numpy's nansum etc.: for <=1.8, just return the function's
results. For >=1.9, any axes with all-nan values will have all-nan outputs
in the collapsed version
"""
def f(data, axis=None, keepdims=None):
if keepdims is None:
result = function(data, axis=axis)
else:
result = function(data, axis=axis, keepdims=keepdims)
if LooseVersion(np.__version__) >= LooseVersion('1.9.0') and hasattr(result, '__len__'):
if axis is None:
if np.all(np.isnan(data)):
return np.nan
else:
return result
if keepdims is None:
nans = np.all(np.isnan(data), axis=axis)
else:
nans = np.all(np.isnan(data), axis=axis, keepdims=keepdims)
result[nans] = np.nan
return result
return f
| [
"distutils.version.LooseVersion",
"numpy.isnan"
] | [((565, 593), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (577, 593), False, 'from distutils.version import LooseVersion\n'), ((597, 618), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.9.0"""'], {}), "('1.9.0')\n", (609, 618), False, 'from distutils.version import LooseVersion\n'), ((706, 720), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (714, 720), True, 'import numpy as np\n'), ((876, 890), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (884, 890), True, 'import numpy as np\n'), ((951, 965), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (959, 965), True, 'import numpy as np\n')] |
import numpy as np
def calc_mass(m, l_rod):
M = np.array([[m, 0., 0.], [0., m, 0.], [0., 0., (1. / 12.) * m * l_rod * l_rod]])
return M
def calc_rot(q):
theta = q[2][0]
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s, 0.), (s, c, 0.), (0., 0., 1.)))
return R
# Location of the rev joint - 1st end
def calc_rD1(q, l_rod):
R = calc_rot(q)
rD = np.array([q[0][0], q[1][0], 0.]) + np.matmul(R, np.array([-l_rod / 2., 0., 0.]))
return rD
def calc_A(q, l):
rG1 = np.array([q[0][0], q[1][0], 0.])
rOrg = calc_rD1(q, l)
rOrgG1 = rOrg - rG1
A = np.array([[0., 1., rOrgG1[0]]])
return A
def calc_Qg(m_p, g):
Qg = m_p*g
return Qg
def calcDrivingForce(q, l, f):
fVec = np.array([[f],[0.],[(f*np.sin(q[2][0]))*(l/2.0)]])
return fVec
def step_sim(f, q, qd, mass, l, h):
# print(f)
g = np.array([[0.], [-9.81], [0.]])
m = calc_mass(mass, l)
W = np.zeros((4, 4))
b = np.zeros((4, 1))
Qg = calc_Qg(mass, g)
Org = np.array([0., 0., 0.])
# Compliance
# C = 1.e-8 * np.identity(6)
# start the step calculations
rO = calc_rD1(q, l)
phi = -(rO - Org) / h
A = calc_A(q, l)
W[0:3, 0:3] = m
W[0:3, 3:4] = A.transpose()
W[3:4, 0:3] = A
# W[9:15, 9:15] = C
fVec = calcDrivingForce(q,l,f)
b[0:3, ] = h * Qg + np.matmul(m, qd) + h * fVec
b[3,] = np.array([phi[1]])
X = np.linalg.solve(W, b)
qd = X[0:3, ]
qp = q
q = qp + h * qd
# print(q[2][0])
if q[2][0] > 2. * np.pi:
q[2][0] = q[2][0] - 2.*np.pi
if q[2][0] < -2. * np.pi:
q[2][0] = q[2][0] + 2. * np.pi
return q, qd
| [
"numpy.zeros",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.matmul",
"numpy.linalg.solve"
] | [((53, 141), 'numpy.array', 'np.array', (['[[m, 0.0, 0.0], [0.0, m, 0.0], [0.0, 0.0, 1.0 / 12.0 * m * l_rod * l_rod]]'], {}), '([[m, 0.0, 0.0], [0.0, m, 0.0], [0.0, 0.0, 1.0 / 12.0 * m * l_rod *\n l_rod]])\n', (61, 141), True, 'import numpy as np\n'), ((232, 286), 'numpy.array', 'np.array', (['((c, -s, 0.0), (s, c, 0.0), (0.0, 0.0, 1.0))'], {}), '(((c, -s, 0.0), (s, c, 0.0), (0.0, 0.0, 1.0)))\n', (240, 286), True, 'import numpy as np\n'), ((511, 544), 'numpy.array', 'np.array', (['[q[0][0], q[1][0], 0.0]'], {}), '([q[0][0], q[1][0], 0.0])\n', (519, 544), True, 'import numpy as np\n'), ((604, 637), 'numpy.array', 'np.array', (['[[0.0, 1.0, rOrgG1[0]]]'], {}), '([[0.0, 1.0, rOrgG1[0]]])\n', (612, 637), True, 'import numpy as np\n'), ((871, 904), 'numpy.array', 'np.array', (['[[0.0], [-9.81], [0.0]]'], {}), '([[0.0], [-9.81], [0.0]])\n', (879, 904), True, 'import numpy as np\n'), ((938, 954), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (946, 954), True, 'import numpy as np\n'), ((963, 979), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (971, 979), True, 'import numpy as np\n'), ((1017, 1042), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1025, 1042), True, 'import numpy as np\n'), ((1394, 1412), 'numpy.array', 'np.array', (['[phi[1]]'], {}), '([phi[1]])\n', (1402, 1412), True, 'import numpy as np\n'), ((1422, 1443), 'numpy.linalg.solve', 'np.linalg.solve', (['W', 'b'], {}), '(W, b)\n', (1437, 1443), True, 'import numpy as np\n'), ((195, 208), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (201, 208), True, 'import numpy as np\n'), ((210, 223), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (216, 223), True, 'import numpy as np\n'), ((387, 420), 'numpy.array', 'np.array', (['[q[0][0], q[1][0], 0.0]'], {}), '([q[0][0], q[1][0], 0.0])\n', (395, 420), True, 'import numpy as np\n'), ((435, 469), 'numpy.array', 'np.array', (['[-l_rod / 2.0, 0.0, 0.0]'], {}), '([-l_rod / 2.0, 0.0, 0.0])\n', (443, 469), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.matmul', 'np.matmul', (['m', 'qd'], {}), '(m, qd)\n', (1363, 1370), True, 'import numpy as np\n'), ((766, 781), 'numpy.sin', 'np.sin', (['q[2][0]'], {}), '(q[2][0])\n', (772, 781), True, 'import numpy as np\n')] |
import numpy as np
import astropy.units as u
import classes
dist = classes.Distribution('planets3_bottomup/')
ms = np.round(np.logspace(np.log10(0.1), np.log10(13*u.Mjup.to('Mearth')),100),3)
aas = np.round(np.logspace(np.log10(0.1), np.log10(10),100), 3)[::-1]
aas2 = np.round(np.logspace(np.log10(0.04), np.log10(10),100), 3)[::-1]
spliced_aas = np.append(aas[:-51], aas2[43:-13])
np.round(np.log10(ms),3)
dist.fit(ms=ms, aas=aas)
| [
"numpy.append",
"numpy.log10",
"astropy.units.Mjup.to",
"classes.Distribution"
] | [((68, 110), 'classes.Distribution', 'classes.Distribution', (['"""planets3_bottomup/"""'], {}), "('planets3_bottomup/')\n", (88, 110), False, 'import classes\n'), ((349, 383), 'numpy.append', 'np.append', (['aas[:-51]', 'aas2[43:-13]'], {}), '(aas[:-51], aas2[43:-13])\n', (358, 383), True, 'import numpy as np\n'), ((393, 405), 'numpy.log10', 'np.log10', (['ms'], {}), '(ms)\n', (401, 405), True, 'import numpy as np\n'), ((137, 150), 'numpy.log10', 'np.log10', (['(0.1)'], {}), '(0.1)\n', (145, 150), True, 'import numpy as np\n'), ((220, 233), 'numpy.log10', 'np.log10', (['(0.1)'], {}), '(0.1)\n', (228, 233), True, 'import numpy as np\n'), ((235, 247), 'numpy.log10', 'np.log10', (['(10)'], {}), '(10)\n', (243, 247), True, 'import numpy as np\n'), ((291, 305), 'numpy.log10', 'np.log10', (['(0.04)'], {}), '(0.04)\n', (299, 305), True, 'import numpy as np\n'), ((307, 319), 'numpy.log10', 'np.log10', (['(10)'], {}), '(10)\n', (315, 319), True, 'import numpy as np\n'), ((164, 183), 'astropy.units.Mjup.to', 'u.Mjup.to', (['"""Mearth"""'], {}), "('Mearth')\n", (173, 183), True, 'import astropy.units as u\n')] |
r"""Postprocessing Laplace equation.
A basic postprocessing step in finite element analysis is evaluating linear
forms over the solution. For the Poisson equation, the integral
of the solution (normalized by the area) is the 'Boussinesq k-factor'; for
the square it's roughly 0.03514, for the circle 1/Pi/8 = 0.03979. Linear forms
are easily evaluated in skfem using the 1-D arrays assembled using the
@LinearForm decorator. In :ref:`poisson`, the linear form required for simple
integration happens to be the same one used on the right-hand side of the
differential equation, so it's already to hand.
Another is interpolation; i.e. evaluation of the solution at a
specified point which isn't necessarily a node of the mesh. For this
problem, the maximum of the solution (normalized by the area) is the
'Boussinesq k'-factor'; by symmetry, this occurs for squares (k' =
0.07363) and circles (k' = 1/Pi/4) at the centre and so can be
evaluated by interpolation.
"""
from pathlib import Path
from skfem import *
from skfem.models.poisson import laplace, unit_load
from skfem.io.json import from_file
import numpy as np
m = MeshTri.init_circle(4)
basis = InteriorBasis(m, ElementTriP2())
A = asm(laplace, basis)
b = asm(unit_load, basis)
x = solve(*condense(A, b, D=basis.find_dofs()))
area = sum(b)
k = b @ x / area**2
k1, = basis.probes(np.zeros((2, 1)))(x) / area
if __name__ == '__main__':
from skfem.visuals.matplotlib import plot, show
print('area = {:.4f} (exact = {:.4f})'.format(area, np.pi))
print('k = {:.5f} (exact = 1/8/pi = {:.5f})'.format(k, 1/np.pi/8))
print("k' = {:.5f} (exact = 1/4/pi = {:.5f})".format(k1, 1/np.pi/4))
plot(basis, x)
show()
| [
"skfem.visuals.matplotlib.show",
"skfem.visuals.matplotlib.plot",
"numpy.zeros"
] | [((1669, 1683), 'skfem.visuals.matplotlib.plot', 'plot', (['basis', 'x'], {}), '(basis, x)\n', (1673, 1683), False, 'from skfem.visuals.matplotlib import plot, show\n'), ((1688, 1694), 'skfem.visuals.matplotlib.show', 'show', ([], {}), '()\n', (1692, 1694), False, 'from skfem.visuals.matplotlib import plot, show\n'), ((1347, 1363), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1355, 1363), True, 'import numpy as np\n')] |
"""
This benchmark compares the performance of different models in
predicting tissue based on gene expression
"""
import argparse
import os
import numpy as np
import pandas as pd
import sklearn.metrics
import yaml
from sklearn.preprocessing import MinMaxScaler
from saged import utils, datasets, models
AVAILABLE_TISSUES = ['Blood', 'Breast', 'Stem Cell', 'Cervix', 'Brain', 'Kidney',
'Umbilical Cord', 'Lung', 'Epithelium', 'Prostate', 'Liver',
'Heart', 'Skin', 'Colon', 'Bone Marrow', 'Muscle', 'Tonsil', 'Blood Vessel',
'Spinal Cord', 'Testis', 'Placenta', 'Bladder', 'Adipose Tisse', 'Ovary',
'Melanoma', 'Adrenal Gland', 'Bone', 'Pancreas', 'Penis',
'Universal reference', 'Spleen', 'Brain reference', 'Large Intestine',
'Esophagus', 'Small Intestine', 'Embryonic kidney', 'Thymus', 'Stomach',
'Endometrium', 'Glioblastoma', 'Gall bladder', 'Lymph Nodes', 'Airway',
'Appendix', 'Thyroid', 'Retina', 'Bowel tissue', 'Foreskin', 'Sperm', 'Foot',
'Cerebellum', 'Cerebral cortex', 'Salivary Gland', 'Duodenum'
]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_config',
help='The yaml formatted dataset configuration file. For more information '
'about this file read the comments in the example_dataset.yml file')
parser.add_argument('supervised_config',
help='The yaml formatted model configuration file. For more information '
'about this file read the comments in the example_model.yml file')
parser.add_argument('out_file',
help='The file to save the results to')
parser.add_argument('--tissue1',
help='The first tissue to be predicted from the data',
default='Blood', choices=AVAILABLE_TISSUES)
parser.add_argument('--tissue2',
help='The second tissue to be predicted from the data',
default='Breast', choices=AVAILABLE_TISSUES)
parser.add_argument('--neptune_config',
help='A yaml formatted file containing init information for '
'neptune logging')
parser.add_argument('--seed',
help='The random seed to be used in splitting data',
type=int,
default=42)
parser.add_argument('--num_splits',
help='The number of splits to use in cross-validation',
type=int,
default=5)
parser.add_argument('--batch_correction_method',
help='The method to use to correct for batch effects',
default=None)
parser.add_argument('--all_tissue', help='Predict all common tissues in the dataset',
default=False, action='store_true')
parser.add_argument('--biobert', help='Add biobert embeddings as features the model can use',
default=False, action='store_true')
args = parser.parse_args()
with open(args.dataset_config) as in_file:
dataset_config = yaml.safe_load(in_file)
expression_df, sample_to_label, sample_to_study = utils.load_recount_data(args.dataset_config)
if args.biobert:
embeddings = utils.load_biobert_embeddings(args.dataset_config)
# These indices are correct, the expression dataframe is genes x samples currently
placeholder_array = np.ones((embeddings.shape[1], expression_df.shape[1]))
with open(dataset_config['metadata_path'], 'r') as in_file:
header = in_file.readline()
header = header.replace('"', '')
header = header.strip().split('\t')
# Add one to the indices to account for the index column in metadata not present in the
# header
sample_index = header.index('external_id') + 1
for line_number, metadata_line in enumerate(in_file):
line = metadata_line.strip().split('\t')
sample = line[sample_index]
sample = sample.replace('"', '')
# Not all samples with metadata are in compendium
if sample not in expression_df.columns:
continue
index_in_df = expression_df.columns.get_loc(sample)
placeholder_array[:, index_in_df] = embeddings[line_number, :]
# 0-1 normalize embeddings to match scale of expression
scaler = MinMaxScaler()
placeholder_array = scaler.fit_transform(placeholder_array.T).T
embedding_df = pd.DataFrame(placeholder_array, columns=expression_df.columns)
expression_df = pd.concat([expression_df, embedding_df], axis='rows')
all_data = datasets.RefineBioMixedDataset(expression_df, sample_to_label, sample_to_study)
labeled_data = all_data.get_labeled()
labels_to_keep = None
if args.all_tissue:
# Keep all labels with at least ten studies in the dataset
labels_to_keep = ['Blood', 'Breast', 'Stem Cell', 'Cervix', 'Brain', 'Kidney',
'Umbilical Cord', 'Lung', 'Epithelium', 'Prostate', 'Liver',
'Heart', 'Skin', 'Colon', 'Bone Marrow', 'Muscle', 'Tonsil',
'Blood Vessel', 'Spinal Cord', 'Testis', 'Placenta'
]
else:
labels_to_keep = [args.tissue1, args.tissue2]
labeled_data.subset_samples_to_labels(labels_to_keep)
# Correct for batch effects
if args.batch_correction_method is not None:
labeled_data = all_data.get_labeled()
labeled_data.subset_samples_to_labels(labels_to_keep)
labeled_data = datasets.correct_batch_effects(labeled_data, args.batch_correction_method)
labeled_data.recode()
label_encoder = labeled_data.get_label_encoder()
# Get fivefold cross-validation splits
print('CV splitting')
labeled_splits = labeled_data.get_cv_splits(num_splits=args.num_splits, seed=args.seed)
# Train the model on each fold
accuracies = []
balanced_accuracies = []
f1_scores = []
supervised_train_studies = []
supervised_train_sample_names = []
supervised_val_sample_names = []
supervised_train_sample_counts = []
subset_percents = []
for i in range(len(labeled_splits)):
for subset_number in range(1, 11, 1):
# The new neptune version doesn't have a create_experiment function so you have to
# reinitialize per-model
neptune_run = None
# Parse config file
if args.neptune_config is not None:
with open(args.neptune_config) as neptune_file:
neptune_config = yaml.safe_load(neptune_file)
neptune_run = utils.initialize_neptune(neptune_config)
subset_percent = subset_number * .1
train_list = labeled_splits[:i] + labeled_splits[i+1:]
# Extract the train and test datasets
LabeledDatasetClass = type(labeled_data)
train_data = LabeledDatasetClass.from_list(train_list)
val_data = labeled_splits[i]
# This isn't strictly necessary since we're checking whether both classes are present,
# but it's safer
train_data.set_label_encoder(label_encoder)
val_data.set_label_encoder(label_encoder)
if not args.all_tissue:
train_data = utils.subset_to_equal_ratio(train_data, val_data, args.tissue1,
args.tissue2, args.seed)
# Now that the ratio is correct, actually subset the samples
train_data = train_data.subset_samples(subset_percent,
args.seed)
# Skip entries where there is only data for one class
if len(train_data.get_classes()) <= 1 or len(val_data.get_classes()) <= 1:
continue
if args.neptune_config is not None:
neptune_run['samples'] = len(train_data.get_samples())
neptune_run['studies'] = len(train_data.get_studies())
print('Samples: {}'.format(len(train_data.get_samples())))
print('Studies: {}'.format(len(train_data.get_studies())))
print('Val data: {}'.format(len(val_data)))
input_size = len(train_data.get_features())
output_size = len(train_data.get_classes())
print('Classes: {}'.format(output_size))
with open(args.supervised_config) as supervised_file:
supervised_config = yaml.safe_load(supervised_file)
supervised_config['input_size'] = input_size
supervised_config['output_size'] = output_size
if 'save_path' in supervised_config:
# Append script-specific information to model save file
save_path = supervised_config['save_path']
# Remove extension
save_path = os.path.splitext(save_path)[0]
if args.all_tissue and args.biobert:
extra_info = 'all_tissue_biobert'
elif args.all_tissue:
extra_info = 'all_tissue'
elif args.biobert:
extra_info = 'biobert'
else:
extra_info = '{}-{}'.format(args.tissue1, args.tissue2)
extra_info = '{}_{}_{}'.format(extra_info, i, args.seed)
save_path = os.path.join(save_path + '_predict_{}.pt'.format(extra_info))
supervised_config['save_path'] = save_path
supervised_model_type = supervised_config.pop('name')
SupervisedClass = getattr(models, supervised_model_type)
supervised_model = SupervisedClass(**supervised_config)
supervised_model.fit(train_data, neptune_run)
predictions, true_labels = supervised_model.evaluate(val_data)
supervised_model.free_memory()
accuracy = sklearn.metrics.accuracy_score(true_labels, predictions)
positive_label_encoding = train_data.get_label_encoding(args.tissue1)
balanced_acc = sklearn.metrics.balanced_accuracy_score(true_labels, predictions)
if args.all_tissue:
f1_score = 'NA'
else:
f1_score = sklearn.metrics.f1_score(true_labels, predictions,
pos_label=positive_label_encoding,
average='binary')
accuracies.append(accuracy)
balanced_accuracies.append(balanced_acc)
f1_scores.append(f1_score)
supervised_train_studies.append(','.join(train_data.get_studies()))
supervised_train_sample_names.append(','.join(train_data.get_samples()))
supervised_val_sample_names.append(','.join(val_data.get_samples()))
supervised_train_sample_counts.append(len(train_data))
subset_percents.append(subset_percent)
train_data.reset_filters()
val_data.reset_filters()
with open(args.out_file, 'w') as out_file:
# Write header
out_file.write('accuracy\tbalanced_accuracy\tf1_score\ttrain studies\ttrain samples\t'
'val samples\ttrain sample count\tfraction of data used\n')
result_iterator = zip(accuracies,
balanced_accuracies,
f1_scores,
supervised_train_studies,
supervised_train_sample_names,
supervised_val_sample_names,
supervised_train_sample_counts,
subset_percents
)
for stats in result_iterator:
stat_strings = [str(item) for item in stats]
out_str = '\t'.join(stat_strings)
out_file.write(f'{out_str}\n')
| [
"saged.utils.load_recount_data",
"pandas.DataFrame",
"argparse.ArgumentParser",
"saged.utils.initialize_neptune",
"sklearn.preprocessing.MinMaxScaler",
"saged.datasets.correct_batch_effects",
"numpy.ones",
"saged.datasets.RefineBioMixedDataset",
"saged.utils.subset_to_equal_ratio",
"yaml.safe_load... | [((1267, 1292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1290, 1292), False, 'import argparse\n'), ((3451, 3495), 'saged.utils.load_recount_data', 'utils.load_recount_data', (['args.dataset_config'], {}), '(args.dataset_config)\n', (3474, 3495), False, 'from saged import utils, datasets, models\n'), ((5032, 5111), 'saged.datasets.RefineBioMixedDataset', 'datasets.RefineBioMixedDataset', (['expression_df', 'sample_to_label', 'sample_to_study'], {}), '(expression_df, sample_to_label, sample_to_study)\n', (5062, 5111), False, 'from saged import utils, datasets, models\n'), ((3373, 3396), 'yaml.safe_load', 'yaml.safe_load', (['in_file'], {}), '(in_file)\n', (3387, 3396), False, 'import yaml\n'), ((3538, 3588), 'saged.utils.load_biobert_embeddings', 'utils.load_biobert_embeddings', (['args.dataset_config'], {}), '(args.dataset_config)\n', (3567, 3588), False, 'from saged import utils, datasets, models\n'), ((3709, 3763), 'numpy.ones', 'np.ones', (['(embeddings.shape[1], expression_df.shape[1])'], {}), '((embeddings.shape[1], expression_df.shape[1]))\n', (3716, 3763), True, 'import numpy as np\n'), ((5976, 6050), 'saged.datasets.correct_batch_effects', 'datasets.correct_batch_effects', (['labeled_data', 'args.batch_correction_method'], {}), '(labeled_data, args.batch_correction_method)\n', (6006, 6050), False, 'from saged import utils, datasets, models\n'), ((4752, 4766), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4764, 4766), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4871, 4933), 'pandas.DataFrame', 'pd.DataFrame', (['placeholder_array'], {'columns': 'expression_df.columns'}), '(placeholder_array, columns=expression_df.columns)\n', (4883, 4933), True, 'import pandas as pd\n'), ((4962, 5015), 'pandas.concat', 'pd.concat', (['[expression_df, embedding_df]'], {'axis': '"""rows"""'}), "([expression_df, embedding_df], axis='rows')\n", (4971, 5015), True, 'import pandas as pd\n'), ((7741, 7834), 'saged.utils.subset_to_equal_ratio', 'utils.subset_to_equal_ratio', (['train_data', 'val_data', 'args.tissue1', 'args.tissue2', 'args.seed'], {}), '(train_data, val_data, args.tissue1, args.\n tissue2, args.seed)\n', (7768, 7834), False, 'from saged import utils, datasets, models\n'), ((8927, 8958), 'yaml.safe_load', 'yaml.safe_load', (['supervised_file'], {}), '(supervised_file)\n', (8941, 8958), False, 'import yaml\n'), ((7003, 7031), 'yaml.safe_load', 'yaml.safe_load', (['neptune_file'], {}), '(neptune_file)\n', (7017, 7031), False, 'import yaml\n'), ((7066, 7106), 'saged.utils.initialize_neptune', 'utils.initialize_neptune', (['neptune_config'], {}), '(neptune_config)\n', (7090, 7106), False, 'from saged import utils, datasets, models\n'), ((9346, 9373), 'os.path.splitext', 'os.path.splitext', (['save_path'], {}), '(save_path)\n', (9362, 9373), False, 'import os\n')] |
import numpy as np
import aesara
import aesara.tensor as tt
class Mlp:
def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None):
if rng is None:
rng = 0
if isinstance(rng, int):
rng = np.random.RandomState(rng)
self.rng = rng
self.nfeatures = nfeatures
self.noutputs = noutputs
self.nhiddens = nhiddens
x = tt.dmatrix("x")
wh = aesara.shared(self.rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
bh = aesara.shared(np.zeros(nhiddens), borrow=True)
h = tt.nnet.sigmoid(tt.dot(x, wh) + bh)
wy = aesara.shared(self.rng.normal(0, 1, (nhiddens, noutputs)))
by = aesara.shared(np.zeros(noutputs), borrow=True)
y = tt.nnet.softmax(tt.dot(h, wy) + by)
self.inputs = [x]
self.outputs = [y]
class OfgNested:
def __init__(self):
x, y, z = tt.scalars("xyz")
e = x * y
op = aesara.OpFromGraph([x, y], [e])
e2 = op(x, y) + z
op2 = aesara.OpFromGraph([x, y, z], [e2])
e3 = op2(x, y, z) + z
self.inputs = [x, y, z]
self.outputs = [e3]
class Ofg:
def __init__(self):
x, y, z = tt.scalars("xyz")
e = tt.nnet.sigmoid((x + y + z) ** 2)
op = aesara.OpFromGraph([x, y, z], [e])
e2 = op(x, y, z) + op(z, y, x)
self.inputs = [x, y, z]
self.outputs = [e2]
class OfgSimple:
def __init__(self):
x, y, z = tt.scalars("xyz")
e = tt.nnet.sigmoid((x + y + z) ** 2)
op = aesara.OpFromGraph([x, y, z], [e])
e2 = op(x, y, z)
self.inputs = [x, y, z]
self.outputs = [e2]
| [
"aesara.tensor.dmatrix",
"aesara.tensor.nnet.sigmoid",
"aesara.tensor.dot",
"numpy.zeros",
"numpy.random.RandomState",
"aesara.OpFromGraph",
"aesara.tensor.scalars"
] | [((408, 423), 'aesara.tensor.dmatrix', 'tt.dmatrix', (['"""x"""'], {}), "('x')\n", (418, 423), True, 'import aesara.tensor as tt\n'), ((914, 931), 'aesara.tensor.scalars', 'tt.scalars', (['"""xyz"""'], {}), "('xyz')\n", (924, 931), True, 'import aesara.tensor as tt\n'), ((963, 994), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y]', '[e]'], {}), '([x, y], [e])\n', (981, 994), False, 'import aesara\n'), ((1035, 1070), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y, z]', '[e2]'], {}), '([x, y, z], [e2])\n', (1053, 1070), False, 'import aesara\n'), ((1217, 1234), 'aesara.tensor.scalars', 'tt.scalars', (['"""xyz"""'], {}), "('xyz')\n", (1227, 1234), True, 'import aesara.tensor as tt\n'), ((1247, 1280), 'aesara.tensor.nnet.sigmoid', 'tt.nnet.sigmoid', (['((x + y + z) ** 2)'], {}), '((x + y + z) ** 2)\n', (1262, 1280), True, 'import aesara.tensor as tt\n'), ((1294, 1328), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y, z]', '[e]'], {}), '([x, y, z], [e])\n', (1312, 1328), False, 'import aesara\n'), ((1490, 1507), 'aesara.tensor.scalars', 'tt.scalars', (['"""xyz"""'], {}), "('xyz')\n", (1500, 1507), True, 'import aesara.tensor as tt\n'), ((1520, 1553), 'aesara.tensor.nnet.sigmoid', 'tt.nnet.sigmoid', (['((x + y + z) ** 2)'], {}), '((x + y + z) ** 2)\n', (1535, 1553), True, 'import aesara.tensor as tt\n'), ((1567, 1601), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y, z]', '[e]'], {}), '([x, y, z], [e])\n', (1585, 1601), False, 'import aesara\n'), ((244, 270), 'numpy.random.RandomState', 'np.random.RandomState', (['rng'], {}), '(rng)\n', (265, 270), True, 'import numpy as np\n'), ((537, 555), 'numpy.zeros', 'np.zeros', (['nhiddens'], {}), '(nhiddens)\n', (545, 555), True, 'import numpy as np\n'), ((718, 736), 'numpy.zeros', 'np.zeros', (['noutputs'], {}), '(noutputs)\n', (726, 736), True, 'import numpy as np\n'), ((598, 611), 'aesara.tensor.dot', 'tt.dot', (['x', 'wh'], {}), '(x, wh)\n', (604, 611), True, 'import aesara.tensor as tt\n'), ((779, 792), 'aesara.tensor.dot', 'tt.dot', (['h', 'wy'], {}), '(h, wy)\n', (785, 792), True, 'import aesara.tensor as tt\n')] |
'''
HOW TO RUN THIS CODE (if tests are within the assignment 1 root):
python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q
python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q --cov
py.test.exe --cov=cs224d/ tests/test_sigmoid_to_solutions.py --cov-report html
(if the tests are within the subfolder tests)
PYTHONPATH=${PWD} py.test.exe tests/ -v --cov-report html
python -m pytest tests -v --cov-report html
Open index.html contained within htmlcov
'''
import pytest
import numpy as np
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol
import random
from collections import defaultdict, OrderedDict, Counter
COUNT=5
def rel_error(x,y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-7, np.abs(x) + np.abs(y))))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid(sigmoid_f):
""" Original sigmoid test defined in q2_sigmoid.py; """
x = np.array([[1, 2], [-1, -2]])
f = sigmoid_f(x)
assert rel_error(f, np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-7
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoidgrad(sigmoid_f):
""" Original sigmoid gradient test defined in q2_sigmoid.py; """
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
assert rel_error(g, np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-7
@pytest.mark.parametrize("dim", list(range(1,8)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_shape(dim, sigmoid_f):
testing_shape = []
for y in range(0,dim):
testing_shape.append(np.random.randint(3,8))
shape = tuple(testing_shape)
#z = np.random.randn(*testing_shape)
x = np.random.standard_normal(shape)
y = np.copy(x)
assert x.shape == sigmoid(y).shape
assert x.shape == sigmoid_grad(sigmoid(y)).shape
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_minus_z(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
y = -z
assert rel_error(1 - sigmoid(y), sigmoid(z)) <= 1e-7
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_monotone(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
shift = np.random.uniform(low=0., high=10., size=count)
assert np.all(sigmoid(z + shift) - sigmoid(z)) >= 0
assert np.all(sigmoid(z - shift) - sigmoid(z)) <= 0
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_range(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
assert np.max(sigmoid(z)) <= 1.
assert np.max(sigmoid(z)) >= 0.
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize('execution_number', list(range(COUNT)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_permutation_axis0(dim_1, execution_number, sigmoid_f):
""" sigmoid needs to be applied element-wise;"""
a1 = np.random.normal(size=(dim_1,1))
s1 = sigmoid(a1)
permutation = np.random.permutation(dim_1)
inverse_permutation = np.argsort(permutation)
s1_perm = sigmoid(a1[permutation])
assert rel_error(s1_perm[inverse_permutation], s1) <= 1e-8
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_permutation_axis1(dim_1, sigmoid_f):
a1 = np.random.normal(size=(1,dim_1))
s1 = sigmoid(a1)
permutation = np.random.permutation(dim_1)
inverse_permutation = np.argsort(permutation)
s1_perm = sigmoid(a1.ravel()[permutation])
assert rel_error(s1_perm.ravel()[inverse_permutation], s1) <= 1e-8
#note: permutation(sigmoid(x)) = sigmoid(permutation(x))
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_gradient(dim_1, dim_2, sigmoid_f):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
shift = np.random.uniform(low=1e-9, high=1e-5, size=(dim_1,dim_2))
ap = a1 + shift
am = a1 - shift
dsigmoid = (sigmoid(ap) - sigmoid(am)) / (2*shift)
assert np.abs(np.max(dsigmoid - sigmoid_grad(sigmoid(a1)))) <= 1e-7
assert np.abs(np.min(dsigmoid - sigmoid_grad(sigmoid(a1)))) <= 1e-7
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
assert rel_error(sigmoid(a1), sigmoid_sol(a1)) <= 1e-10
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
a1_copy = a1.copy()
s_a1 = sigmoid(a1)
s_sol_a1 = sigmoid_sol(a1_copy)
assert rel_error(sigmoid_grad(s_a1), sigmoid_grad_sol(s_sol_a1)) <= 1e-10
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
a1_copy = a1.copy()
assert rel_error(sigmoid_grad(a1), sigmoid_grad_sol(a1_copy)) <= 1e-10
| [
"q2_sigmoid_sol.sigmoid_grad_sol",
"numpy.random.uniform",
"numpy.abs",
"numpy.copy",
"q2_sigmoid.sigmoid",
"q2_sigmoid_sol.sigmoid_sol",
"numpy.argsort",
"q2_sigmoid.sigmoid_grad",
"numpy.random.standard_normal",
"numpy.array",
"numpy.random.randint",
"numpy.random.normal",
"numpy.random.pe... | [((831, 891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (854, 891), False, 'import pytest\n'), ((1145, 1205), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (1168, 1205), False, 'import pytest\n'), ((1544, 1604), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (1567, 1604), False, 'import pytest\n'), ((1976, 2036), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (1999, 2036), False, 'import pytest\n'), ((2212, 2272), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (2235, 2272), False, 'import pytest\n'), ((2557, 2617), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (2580, 2617), False, 'import pytest\n'), ((2913, 2973), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (2936, 2973), False, 'import pytest\n'), ((3440, 3500), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (3463, 3500), False, 'import pytest\n'), ((4022, 4082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (4045, 4082), False, 'import pytest\n'), ((989, 1017), 'numpy.array', 'np.array', (['[[1, 2], [-1, -2]]'], {}), '([[1, 2], [-1, -2]])\n', (997, 1017), True, 'import numpy as np\n'), ((1316, 1344), 'numpy.array', 'np.array', (['[[1, 2], [-1, -2]]'], {}), '([[1, 2], [-1, -2]])\n', (1324, 1344), True, 'import numpy as np\n'), ((1353, 1363), 'q2_sigmoid.sigmoid', 'sigmoid', (['x'], {}), '(x)\n', (1360, 1363), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((1372, 1387), 'q2_sigmoid.sigmoid_grad', 'sigmoid_grad', (['f'], {}), '(f)\n', (1384, 1387), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((1830, 1862), 'numpy.random.standard_normal', 'np.random.standard_normal', (['shape'], {}), '(shape)\n', (1855, 1862), True, 'import numpy as np\n'), ((1871, 1881), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (1878, 1881), True, 'import numpy as np\n'), ((2093, 2143), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(100.0)', 'size': 'count'}), '(loc=0.0, scale=100.0, size=count)\n', (2109, 2143), True, 'import numpy as np\n'), ((2334, 2384), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(100.0)', 'size': 'count'}), '(loc=0.0, scale=100.0, size=count)\n', (2350, 2384), True, 'import numpy as np\n'), ((2395, 2444), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(10.0)', 'size': 'count'}), '(low=0.0, high=10.0, size=count)\n', (2412, 2444), True, 'import numpy as np\n'), ((2672, 2722), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(100.0)', 'size': 'count'}), '(loc=0.0, scale=100.0, size=count)\n', (2688, 2722), True, 'import numpy as np\n'), ((3117, 3150), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(dim_1, 1)'}), '(size=(dim_1, 1))\n', (3133, 3150), True, 'import numpy as np\n'), ((3168, 3179), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (3175, 3179), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((3199, 3227), 'numpy.random.permutation', 'np.random.permutation', (['dim_1'], {}), '(dim_1)\n', (3220, 3227), True, 'import numpy as np\n'), ((3254, 3277), 'numpy.argsort', 'np.argsort', (['permutation'], {}), '(permutation)\n', (3264, 3277), True, 'import numpy as np\n'), ((3297, 3321), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1[permutation]'], {}), '(a1[permutation])\n', (3304, 3321), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((3573, 3606), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, dim_1)'}), '(size=(1, dim_1))\n', (3589, 3606), True, 'import numpy as np\n'), ((3624, 3635), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (3631, 3635), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((3655, 3683), 'numpy.random.permutation', 'np.random.permutation', (['dim_1'], {}), '(dim_1)\n', (3676, 3683), True, 'import numpy as np\n'), ((3710, 3733), 'numpy.argsort', 'np.argsort', (['permutation'], {}), '(permutation)\n', (3720, 3733), True, 'import numpy as np\n'), ((4147, 4205), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (4163, 4205), True, 'import numpy as np\n'), ((4215, 4276), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1e-09)', 'high': '(1e-05)', 'size': '(dim_1, dim_2)'}), '(low=1e-09, high=1e-05, size=(dim_1, dim_2))\n', (4232, 4276), True, 'import numpy as np\n'), ((4665, 4723), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (4681, 4723), True, 'import numpy as np\n'), ((4936, 4994), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (4952, 4994), True, 'import numpy as np\n'), ((5033, 5044), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (5040, 5044), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((5060, 5080), 'q2_sigmoid_sol.sigmoid_sol', 'sigmoid_sol', (['a1_copy'], {}), '(a1_copy)\n', (5071, 5080), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((5311, 5369), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (5327, 5369), True, 'import numpy as np\n'), ((770, 783), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (776, 783), True, 'import numpy as np\n'), ((1063, 1125), 'numpy.array', 'np.array', (['[[0.73105858, 0.88079708], [0.26894142, 0.11920292]]'], {}), '([[0.73105858, 0.88079708], [0.26894142, 0.11920292]])\n', (1071, 1125), True, 'import numpy as np\n'), ((1412, 1474), 'numpy.array', 'np.array', (['[[0.19661193, 0.10499359], [0.19661193, 0.10499359]]'], {}), '([[0.19661193, 0.10499359], [0.19661193, 0.10499359]])\n', (1420, 1474), True, 'import numpy as np\n'), ((1724, 1747), 'numpy.random.randint', 'np.random.randint', (['(3)', '(8)'], {}), '(3, 8)\n', (1741, 1747), True, 'import numpy as np\n'), ((1904, 1914), 'q2_sigmoid.sigmoid', 'sigmoid', (['y'], {}), '(y)\n', (1911, 1914), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2190, 2200), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2197, 2200), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2739, 2749), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2746, 2749), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2775, 2785), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2782, 2785), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4331, 4342), 'q2_sigmoid.sigmoid', 'sigmoid', (['ap'], {}), '(ap)\n', (4338, 4342), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4345, 4356), 'q2_sigmoid.sigmoid', 'sigmoid', (['am'], {}), '(am)\n', (4352, 4356), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4742, 4753), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (4749, 4753), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4755, 4770), 'q2_sigmoid_sol.sigmoid_sol', 'sigmoid_sol', (['a1'], {}), '(a1)\n', (4766, 4770), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((5103, 5121), 'q2_sigmoid.sigmoid_grad', 'sigmoid_grad', (['s_a1'], {}), '(s_a1)\n', (5115, 5121), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((5123, 5149), 'q2_sigmoid_sol.sigmoid_grad_sol', 'sigmoid_grad_sol', (['s_sol_a1'], {}), '(s_sol_a1)\n', (5139, 5149), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((5414, 5430), 'q2_sigmoid.sigmoid_grad', 'sigmoid_grad', (['a1'], {}), '(a1)\n', (5426, 5430), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((5432, 5457), 'q2_sigmoid_sol.sigmoid_grad_sol', 'sigmoid_grad_sol', (['a1_copy'], {}), '(a1_copy)\n', (5448, 5457), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((1956, 1966), 'q2_sigmoid.sigmoid', 'sigmoid', (['y'], {}), '(y)\n', (1963, 1966), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2178, 2188), 'q2_sigmoid.sigmoid', 'sigmoid', (['y'], {}), '(y)\n', (2185, 2188), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2461, 2479), 'q2_sigmoid.sigmoid', 'sigmoid', (['(z + shift)'], {}), '(z + shift)\n', (2468, 2479), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2482, 2492), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2489, 2492), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2517, 2535), 'q2_sigmoid.sigmoid', 'sigmoid', (['(z - shift)'], {}), '(z - shift)\n', (2524, 2535), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2538, 2548), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2545, 2548), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((804, 813), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (810, 813), True, 'import numpy as np\n'), ((816, 825), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (822, 825), True, 'import numpy as np\n'), ((4419, 4430), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (4426, 4430), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4491, 4502), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (4498, 4502), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n')] |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that generate and alter :ref:`image<images>` color.
"""
import functools
import logging
import numpy
import imagecat.data
import imagecat.operator.util
import imagecat.units
log = logging.getLogger(__name__)
def colormap(graph, name, inputs):
"""Convert single-channel layers to RGB layers using a colormap.
Parameters
----------
graph: :ref:`graph`, required
Graph that owns this task.
name: hashable object, required
Name of the task executing this function.
inputs: :ref:`named-inputs`, required
Inputs for this operator.
Named Inputs
------------
image: :class:`imagecat.data.Image`, required
Image with layer to be color mapped.
inlayer: :class:`str`, optional
`image` layer to be color mapped. Default: :any:`None`.
outlayer: :class:`str`, optional
Name of the output image layer. Default: ``"C"``.
mapping: Python callable, optional
Mapping function that accepts a shape `(rows, columns, 1)` array as input and
produces an RGB `(rows, columns, 3)` shaped array as output. If :any:`None`
(the default), a linear map with a Color Brewer 2 Blue-Red palette will
be used.
Returns
-------
image: :class:`imagecat.data.Image`
A copy of the input image with some layers mapped.
"""
inlayer = imagecat.operator.util.optional_input(name, inputs, "inlayer", default=None)
outlayer = imagecat.operator.util.optional_input(name, inputs, "outlayer", default="C")
layer_name, layer = imagecat.operator.util.require_layer(name, inputs, "image", layer=inlayer, depth=1)
mapping = imagecat.operator.util.optional_input(name, inputs, "mapping", default=None)
if mapping is None:
palette = imagecat.color.brewer.palette("BlueRed")
mapping = functools.partial(imagecat.color.linear_map, palette=palette)
data = mapping(layer.data[:,:,0])
output = imagecat.data.Image(layers={outlayer: imagecat.data.Layer(data=data, role=imagecat.data.Role.RGB)})
imagecat.operator.util.log_result(log, name, "colormap", output, inlayer=inlayer, outlayer=outlayer, mapping=mapping)
return output
def dot(graph, name, inputs):
"""Compute the dot product of a :class:`image.data.Layer` and a matrix.
This is most commonly used to convert an RGB layer to grayscale, but the
operator is capable of converting any depth :math:`M` layer to depth
:math:`N` using an :math:`M \\times N` matrix. The values in each output
channel will be a weighted sum of the input channels, using weights
stored in the corresponding matrix column.
Parameters
----------
graph: :ref:`graph`, required
Graph that owns this task.
name: hashable object, required
Name of the task executing this function.
inputs: :ref:`named-inputs`, required
Inputs for this operator.
Named Inputs
------------
image: :class:`imagecat.data.Image`, required
Image containing layer to be converted.
inlayer: :class:`str`, optional
Layer to be converted. Default: None.
outlayer: :class:`str`, optional
Output layer. Default: "Y".
outrole: :class:`imagecat.data.Role`, optional
Role for the new layer. Defaults to :class:`imagecat.data.role.LUMINANCE`.
matrix: :math:`M \\times N` :class:`numpy.ndarray` matrix, optional
Matrix controlling how much each input channel contributes to each output channel.
Defaults to an RGB-to-grayscale matrix. :math:`M` must match the depth of the
input layer, and :math:`N` must match the expected depth of the output role.
Returns
-------
image: :class:`imagecat.data.Image`
Image containing the new layer.
"""
inlayer = imagecat.operator.util.optional_input(name, inputs, "inlayer", default=None)
layer_name, layer = imagecat.operator.util.require_layer(name, inputs, "image", layer=inlayer)
outdtype = imagecat.operator.util.optional_input(name, inputs, "outdtype", type=numpy.dtype, default=numpy.float16)
outlayer = imagecat.operator.util.optional_input(name, inputs, "outlayer", type=str, default="Y")
outrole = imagecat.operator.util.optional_input(name, inputs, "outrole", type=imagecat.data.Role, default=imagecat.data.Role.LUMINANCE)
matrix = imagecat.operator.util.optional_input(name, inputs, "matrix", type=imagecat.operator.util.array(ndim=2), default=[[0.2125], [0.7154], [0.0721]])
data = numpy.dot(layer.data, matrix).astype(outdtype)
image = imagecat.data.Image(layers={outlayer: imagecat.data.Layer(data=data, role=outrole)})
imagecat.operator.util.log_result(log, name, "dot", image, inlayer=inlayer, outdtype=outdtype, outlayer=outlayer, outrole=outrole, matrix=matrix)
return image
def fill(graph, name, inputs):
"""Generate an :ref:`image<images>` with a single solid-color layer.
Parameters
----------
graph: :ref:`graph`, required
Graph that owns this task.
name: hashable object, required
Name of the task executing this function.
inputs: :ref:`named-inputs`, required
Inputs for this operator.
Named Inputs
------------
layer: :class:`str`, optional
New layer name. Default: `"C"`.
res: (width, height) tuple, optional
Resolution of the new image. Default: `(256, 256)`.
role: :class:`imagecat.data.Role`, optional
Semantic role of the new layer. Default: :class:`imagecat.data.Role.RGB`.
values: sequence of values, optional
Values for the new layer. The number of values must be appropriate for `role`. Default: [1, 1, 1].
Returns
-------
image: :class:`imagecat.data.Image`
New image with a single solid-color layer.
"""
layer = imagecat.operator.util.optional_input(name, inputs, "layer", type=str, default="C")
res = imagecat.operator.util.optional_input(name, inputs, "res", type=imagecat.operator.util.array(shape=(2,), dtype=int), default=[256, 256])
role = imagecat.operator.util.optional_input(name, inputs, "role", type=imagecat.data.Role, default=imagecat.data.Role.RGB)
values = imagecat.operator.util.optional_input(name, inputs, "values", type=numpy.array, default=[1, 1, 1])
data = numpy.full((res[1], res[0], len(values)), values, dtype=numpy.float16)
output = imagecat.data.Image(layers={layer: imagecat.data.Layer(data=data, role=role)})
imagecat.operator.util.log_result(log, name, "fill", output, layer=layer, role=role, res=res, values=values)
return output
| [
"numpy.dot",
"functools.partial",
"logging.getLogger"
] | [((767, 794), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (784, 794), False, 'import logging\n'), ((2413, 2474), 'functools.partial', 'functools.partial', (['imagecat.color.linear_map'], {'palette': 'palette'}), '(imagecat.color.linear_map, palette=palette)\n', (2430, 2474), False, 'import functools\n'), ((5080, 5109), 'numpy.dot', 'numpy.dot', (['layer.data', 'matrix'], {}), '(layer.data, matrix)\n', (5089, 5109), False, 'import numpy\n')] |
import itertools
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from coremltools._deps import _HAS_KERAS2_TF
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models.utils import _macos_version, _is_macos
if _HAS_KERAS2_TF:
import keras.backend
from keras.models import Sequential, Model
from keras.layers import (
Dense,
Activation,
Conv2D,
Conv1D,
Flatten,
BatchNormalization,
Conv2DTranspose,
SeparableConv2D,
)
from keras.layers import (
MaxPooling2D,
AveragePooling2D,
GlobalAveragePooling2D,
GlobalMaxPooling2D,
)
from keras.layers import (
MaxPooling1D,
AveragePooling1D,
GlobalAveragePooling1D,
GlobalMaxPooling1D,
)
from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout
from keras.layers import Add, Concatenate
from keras.layers import add, multiply, concatenate, dot, maximum, average
from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D
from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D
from keras.layers import SimpleRNN, LSTM, GRU
from keras.layers.core import SpatialDropout2D
from keras.layers.wrappers import Bidirectional, TimeDistributed
from distutils.version import StrictVersion as _StrictVersion
if keras.__version__ >= _StrictVersion("2.2.1"):
from keras.layers import DepthwiseConv2D, ReLU
elif keras.__version__ >= _StrictVersion("2.2.0"):
from keras.layers import DepthwiseConv2D
from keras_applications.mobilenet import relu6
else:
from keras.applications.mobilenet import DepthwiseConv2D, relu6
def _keras_transpose(x, is_sequence=False):
if len(x.shape) == 5:
# Keras input shape = [Batch, Seq, Height, Width, Channels]
x = np.transpose(x, [1, 0, 4, 2, 3])
if len(x.shape) == 4:
# Keras input shape = [Batch, Height, Width, Channels]
x = np.transpose(x, [0, 3, 1, 2])
return np.expand_dims(x, axis=0)
elif len(x.shape) == 3:
# Keras input shape = [Batch, (Sequence) Length, Channels]
return np.transpose(x, [1, 0, 2])
elif len(x.shape) == 2:
if is_sequence: # (N,S) --> (S,N,1,)
return x.reshape(x.shape[::-1] + (1,))
else: # (N,C) --> (N,C,1,1)
return x.reshape((1,) + x.shape) # Dense
elif len(x.shape) == 1:
if is_sequence: # (S) --> (S,N,1,1,1)
return x.reshape((x.shape[0], 1, 1))
else:
return x
else:
return x
def _get_coreml_model(
model,
input_names=["data"],
output_names=["output"],
input_name_shape_dict={},
model_precision=_MLMODEL_FULL_PRECISION,
use_float_arraytype=False,
):
"""
Get the coreml model from the Keras model.
"""
# Convert the model
from coremltools.converters import keras as keras_converter
model = keras_converter.convert(
model,
input_names,
output_names,
input_name_shape_dict=input_name_shape_dict,
model_precision=model_precision,
use_float_arraytype=use_float_arraytype,
)
return model
def _generate_data(input_shape, mode="random"):
"""
Generate some random data according to a shape.
"""
if mode == "zeros":
X = np.zeros(input_shape)
elif mode == "ones":
X = np.ones(input_shape)
elif mode == "linear":
X = np.array(range(np.product(input_shape))).reshape(input_shape)
elif mode == "random":
X = np.random.rand(*input_shape)
elif mode == "random_zero_mean":
X = np.random.rand(*input_shape) - 0.5
return X
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasNumericCorrectnessTest(unittest.TestCase):
"""
Unit test class for testing the Keras converter.
"""
def runTest(self):
pass
def _get_coreml_model_params_and_test_input(
self, model, mode, one_dim_seq_flags, input_name_shape_dict={}
):
# Generate data
nb_inputs = len(model.inputs)
if nb_inputs > 1:
input_names = []
input_data = []
coreml_input = {}
for i in range(nb_inputs):
feature_name = "data_%s" % i
input_names.append(feature_name)
if feature_name in input_name_shape_dict:
input_shape = [
1 if a is None else a
for a in input_name_shape_dict[feature_name]
]
else:
input_shape = [1 if a is None else a for a in model.input_shape[i]]
X = _generate_data(input_shape, mode)
input_data.append(X)
if one_dim_seq_flags is None:
coreml_input[feature_name] = _keras_transpose(X).astype("f").copy()
else:
coreml_input[feature_name] = (
_keras_transpose(X, one_dim_seq_flags[i]).astype("f").copy()
)
else:
input_names = ["data"]
if "data" in input_name_shape_dict:
input_shape = [
1 if a is None else a for a in input_name_shape_dict["data"]
]
else:
input_shape = [1 if a is None else a for a in model.input_shape]
input_data = _generate_data(input_shape, mode)
if one_dim_seq_flags is None:
coreml_input = {"data": _keras_transpose(input_data).astype("f").copy()}
else:
coreml_input = {
"data": _keras_transpose(input_data, one_dim_seq_flags[0])
.astype("f")
.copy()
}
output_names = ["output" + str(i) for i in range(len(model.outputs))]
return input_names, output_names, input_data, coreml_input
def _test_model(
self,
model,
input_name_shape_dict={},
num_samples=1,
mode="random",
delta=1e-2,
model_dir=None,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
# transpose_keras_result: if true, compare the transposed Keras result
# one_dim_seq_flags: a list of same length as the number of inputs in
# the model; if None, treat all 1D input (if any) as non-sequence
# if one_dim_seq_flags[i] is True, it means the ith input, with shape
# (X,) is in fact a sequence of length X.
# Get the CoreML model
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
(
input_names,
output_names,
input_data,
coreml_input,
) = self._get_coreml_model_params_and_test_input(
model, mode, one_dim_seq_flags, input_name_shape_dict
)
coreml_model = _get_coreml_model(
model,
input_names,
output_names,
input_name_shape_dict,
model_precision=model_precision,
)
try:
if not (_is_macos() and _macos_version() >= (10, 13)):
return
# Assuming coreml model output names are in the same order as
# Keras output list, put predictions into a list, sorted by output
# name
coreml_preds = coreml_model.predict(coreml_input)
c_preds = [coreml_preds[name] for name in output_names]
# Get Keras predictions
keras_preds = model.predict(input_data)
k_preds = keras_preds if type(keras_preds) is list else [keras_preds]
# Compare each output blob
for idx, k_pred in enumerate(k_preds):
if transpose_keras_result:
kp = _keras_transpose(k_pred).flatten()
else:
kp = k_pred.flatten()
cp = c_preds[idx].flatten()
# Compare predictions
self.assertEqual(len(kp), len(cp))
for i in range(len(kp)):
max_den = max(1.0, kp[i], cp[i])
self.assertAlmostEqual(
kp[i] / max_den, cp[i] / max_den, delta=delta
)
finally:
# Cleanup files - models on disk no longer useful
if use_tmp_folder and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasBasicNumericCorrectnessTest(KerasNumericCorrectnessTest):
def test_tiny_inner_product(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(2, input_shape=(2,)))
# Test all zeros
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="zeros", model_precision=model_precision)
# Test all ones
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
self._test_model(model, mode="ones", model_precision=model_precision)
# Test random
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, model_precision=model_precision)
def test_tiny_inner_product_half_precision(self):
self.test_tiny_inner_product(model_precision=_MLMODEL_HALF_PRECISION)
def test_inner_product_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(1000, input_shape=(100,)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_inner_product_half_precision_random(self):
self.test_inner_product_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_dense_softmax(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="softmax"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_dense_elu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="elu"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_dense_selu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="selu"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_housenet_random(self):
np.random.seed(1988)
num_hidden = 2
num_features = 3
# Define a model
model = Sequential()
model.add(Dense(num_hidden, input_dim=num_features))
model.add(Activation("relu"))
model.add(Dense(1, input_dim=num_features))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_ones(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_ones_half_precision(self):
self.test_tiny_conv_ones(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
@unittest.skipUnless(
_is_macos() and _macos_version() >= (10, 14), "Only supported on MacOS 10.14+"
)
def test_tiny_conv_random_input_shape_dict(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
H, W, C = 10, 20, 5
input_shape = (None, H, W, C)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=(None, None, C),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(
model,
input_name_shape_dict={"data": input_shape},
model_precision=model_precision,
)
def test_tiny_conv_random_half_precision(self):
self.test_tiny_conv_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_half_precision(self):
return self.test_tiny_conv_dilated(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_dilated_rect_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_shape = (32, 20, 3)
num_kernels = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_rect_random_half_precision(self):
return self.test_tiny_conv_dilated_rect_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_pseudo_1d_x(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 5
filter_length = 1 # 3
nb_filters = 1
# Define a model
model = Sequential()
model.add(
Conv2D(
nb_filters,
kernel_size=(1, filter_length),
input_shape=(1, input_length, input_dim),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
self._test_model(model, mode="linear", model_precision=model_precision)
def test_tiny_conv_pseudo_1d_x_half_precision(self):
return self.test_tiny_conv_pseudo_1d_x(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv1d_same_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv1d_same_random_input_shape_dict(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(None, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(
model, input_name_shape_dict={"data": (None, input_length, input_dim)}
)
def test_large_input_length_conv1d_same_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 2
input_length = 80
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_large_input_length_conv1d_same_random_half_precision(self):
return self.test_large_input_length_conv1d_same_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv1d_valid_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="valid",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv1d_dilated_random(self):
np.random.seed(1988)
input_shape = (20, 1)
num_kernels = 2
filter_length = 3
# Define a model
model = Sequential()
model.add(
Conv1D(
num_kernels,
kernel_size=filter_length,
padding="valid",
input_shape=input_shape,
dilation_rate=3,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_x(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 1
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_y(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 1
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_xy(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_rect_kernel_xy_half_precision(self):
self.test_tiny_conv_rect_kernel_xy(model_precision=_MLMODEL_HALF_PRECISION)
def test_flatten(self):
model = Sequential()
model.add(Flatten(input_shape=(2, 2, 2)))
self._test_model(model, mode="linear")
def test_conv_dense(self, model_precision=_MLMODEL_FULL_PRECISION):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=input_shape))
model.add(Flatten())
model.add(Dense(10, activation="softmax"))
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_dense_half_precision(self):
return self.test_conv_dense(model_precision=_MLMODEL_HALF_PRECISION)
def test_conv_batchnorm_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(BatchNormalization(epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_batchnorm_random_half_precision(self):
return self.test_conv_batchnorm_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_conv_batchnorm_no_gamma_no_beta(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(BatchNormalization(center=False, scale=False, epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_batchnorm_no_gamma_no_beta_half_precision(self):
return self.test_conv_batchnorm_no_gamma_no_beta(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_deconv_random(self):
# In Keras 2, deconvolution auto computes the output shape.
np.random.seed(1988)
input_dim = 13
input_shape = (input_dim, input_dim, 5)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
use_bias=False,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_deconv_random_same_padding(self):
np.random.seed(1988)
input_dim = 14
input_shape = (input_dim, input_dim, 3)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(2, 2),
use_bias=True,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_same_pad(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_same_pad_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 4
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_valid(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
strides=(1, 1),
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_same_fancy(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
strides=(2, 2),
activation="relu",
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_valid_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 5
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
strides=(1, 1),
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_same_fancy_depth_multiplier(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
strides=(2, 2),
activation="relu",
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_same_fancy_depth_multiplier_half_precision(self):
return self.test_tiny_separable_conv_same_fancy_depth_multiplier(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_separable_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_dilated_half_precision(self):
return self.test_tiny_separable_conv_dilated(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_separable_conv_dilated_rect_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_shape = (32, 20, 3)
num_kernels = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_dilated_rect_random_half_precision(self):
return self.test_tiny_separable_conv_dilated_rect_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_max_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2), strides=None, padding="valid"
)
)
self._test_model(model)
def test_max_pooling_overlap_multiple(self):
# input shape is multiple of pool_size, strides != pool_size
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(18, 18, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="valid",
)
)
self._test_model(model)
def test_max_pooling_overlap_odd(self):
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="valid",
)
)
self._test_model(model)
def test_max_pooling_overlap_same(self):
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="same",
)
)
self._test_model(model)
def test_global_max_pooling(self):
model = Sequential()
model.add(GlobalMaxPooling2D(input_shape=(16, 16, 3)))
self._test_model(model)
def test_average_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
AveragePooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2), strides=None, padding="valid"
)
)
self._test_model(model, delta=1e-2)
def test_average_pooling_inception_config_1(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
AveragePooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(1, 1),
padding="same",
)
)
self._test_model(model, delta=1e-2)
def test_global_average_pooling(self):
model = Sequential()
model.add(GlobalAveragePooling2D(input_shape=(16, 16, 3)))
self._test_model(model)
def test_max_pooling_1d(self):
model = Sequential()
model.add(MaxPooling1D(input_shape=(16, 3), pool_size=4))
self._test_model(model)
def test_global_max_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(GlobalMaxPooling1D())
self._test_model(model)
def test_average_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(AveragePooling1D(pool_size=2))
self._test_model(model)
def test_global_average_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(GlobalAveragePooling1D())
self._test_model(model)
def test_tiny_conv_upsample_random(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(UpSampling2D(size=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_upsample_1d_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(UpSampling1D(size=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_crop_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(Cropping1D(cropping=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_crop_1d_random_half_precision(self):
return self.test_tiny_conv_crop_1d_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_pad_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(ZeroPadding1D(padding=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_pad_1d_random_half_precision(self):
return self.test_tiny_conv_pad_1d_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_causal_1d(self):
np.random.seed(1988)
model = Sequential()
model.add(Conv1D(1, 3, input_shape=(10, 1), use_bias=False, padding="causal"))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_embedding(self, model_precision=_MLMODEL_FULL_PRECISION):
model = Sequential()
num_inputs = 10
num_outputs = 3
model.add(Embedding(num_inputs, num_outputs))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, model_precision=model_precision)
def test_embedding_half_precision(self):
return self.test_embedding(model_precision=_MLMODEL_HALF_PRECISION)
def test_embedding_seq(self, model_precision=_MLMODEL_FULL_PRECISION):
model = Sequential()
num_inputs = 10
num_outputs = 3
model.add(Embedding(num_inputs, num_outputs, input_length=7))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(
model, one_dim_seq_flags=[True], model_precision=model_precision
)
def test_embedding_seq_half_precision(self):
return self.test_embedding_seq(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_no_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_seq2seq_rnn_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(
SimpleRNN(
num_channels,
input_shape=(input_length, input_dim),
return_sequences=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_rnn_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
SimpleRNN(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_rnn_seq_backwards(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
SimpleRNN(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_lstm_zeros(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="zeros")
def test_tiny_no_sequence_lstm_ones(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="ones")
def test_small_no_sequence_lstm_zeros(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="zeros")
def test_small_no_sequence_lstm_ones(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="ones")
def test_lstm_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
model = Sequential()
model.add(
LSTM(20, input_shape=(input_length, input_dim), return_sequences=False)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model)
def test_lstm_seq_backwards(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
model = Sequential()
model.add(
LSTM(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model)
def test_medium_no_sequence_lstm_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_lstm_zeros_gpu(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, mode="zeros")
def test_small_no_sequence_lstm_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_gru_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_gru_random_half_precision(self):
return self.test_tiny_no_sequence_gru_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_small_no_sequence_gru_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_gru_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_medium_no_sequence_gru_random_half_precision(self):
return self.test_medium_no_sequence_gru_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_gru_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_gru_seq_backwards(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_gru_seq_backwards_half_precision(self):
return self.test_gru_seq_backwards(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_no_sequence_bidir_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=1, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_bidir_random_half_precision(self):
return self.test_tiny_no_sequence_bidir_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_no_sequence_bidir_random_gpu(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_bidir_random_gpu_half_precision(self):
return self.test_tiny_no_sequence_bidir_random_gpu(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_small_no_sequence_bidir_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_bidir_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_bidir_random_return_seq_false(self):
np.random.seed(1988)
input_dim = 7
input_length = 5
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(
num_channels,
return_sequences=False,
implementation=2,
recurrent_activation="sigmoid",
),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_bidir_random_return_seq_true(self):
np.random.seed(1988)
input_dim = 7
input_length = 5
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(
num_channels,
return_sequences=True,
implementation=2,
recurrent_activation="sigmoid",
),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_bilstm_merge_modes(self):
# issue 157
def get_model(input_dim, fc_size, rnn_size, output_dim, merge_mode):
input_data = Input(name="the_input", shape=(None, input_dim))
x = TimeDistributed(Dense(fc_size, name="fc1", activation="relu",))(
input_data
)
x = Bidirectional(
LSTM(
rnn_size,
return_sequences=True,
activation="relu",
kernel_initializer="he_normal",
),
merge_mode=merge_mode,
)(x)
y_pred = TimeDistributed(
Dense(output_dim, name="y_pred", activation="softmax")
)(x)
model = Model([input_data], [y_pred])
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
return model
input_dim = 26
fc_size = 512
rnn_size = 512
output_dim = 29
for merge_mode in ["concat", "sum", "mul", "ave"]:
model = get_model(input_dim, fc_size, rnn_size, output_dim, merge_mode)
self._test_model(model)
def test_tiny_conv_elu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ELU
model = Sequential()
model.add(Conv2D(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5)))
model.add(ELU(alpha=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_prelu_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import PReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(PReLU(shared_axes=[1, 2]))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_prelu_random_half_precision(self):
return self.test_tiny_conv_prelu_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_leaky_relu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import LeakyReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(LeakyReLU(alpha=0.3))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_thresholded_relu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ThresholdedReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(ThresholdedReLU(theta=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_concat_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = concatenate([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_concat_seq_random(self):
np.random.seed(1988)
max_features = 10
embedding_dims = 4
seq_len = 5
num_channels = 6
# Define a model
input_tensor = Input(shape=(seq_len,))
x1 = Embedding(max_features, embedding_dims)(input_tensor)
x2 = Embedding(max_features, embedding_dims)(input_tensor)
x3 = concatenate([x1, x2], axis=1)
model = Model(inputs=[input_tensor], outputs=[x3])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True])
def test_lstm_concat_dense_random(self):
np.random.seed(1988)
vocab_size = 1250
seq_length = 5
units = 32
# Define a model
input = Input(shape=(seq_length,))
pos = Input(shape=(seq_length, 1))
embedding = Embedding(vocab_size, 50, input_length=seq_length)(input)
concat = Concatenate(axis=2)([embedding, pos])
model = LSTM(units, return_sequences=True, stateful=False)(concat)
model = LSTM(units, return_sequences=False)(model)
model = Dense(100, activation="relu")(model)
model = Dense(vocab_size, activation="softmax")(model)
model = Model(inputs=[input, pos], outputs=model)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True, True])
def test_tiny_add_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = add([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_mul_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = multiply([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_cos_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = dot([x2, x3], axes=-1, normalize=True)
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_zeropad_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_zeropad_fancy(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D(((2, 5), (3, 4)), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_crop_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Cropping2D(cropping=((2, 5), (2, 5)), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_permute(self):
# When input blob is 3D array (D1, D2, D3), Keras assumes the axes' meaning is
# (D1=H,D2=W,D3=C), while CoreML assumes (D1=C,D2=H,D3=W)
import itertools
for permute_order in list(itertools.permutations([1, 2, 3])):
model = Sequential()
model.add(Permute(permute_order, input_shape=(4, 3, 2)))
self._test_model(model, transpose_keras_result=True)
def test_reshape_3d(self):
model = Sequential()
model.add(Reshape((10, 1, 6), input_shape=(5, 4, 3)))
self._test_model(model, mode="linear")
def test_tiny_conv_dense_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(hidden_dim))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_dropout_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(SpatialDropout2D(0.5))
model.add(Flatten())
model.add(Dense(hidden_dim))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_dense_tanh_fused_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 3
hidden_dim = 4
# Define a model
model = Sequential()
model.add(Dense(hidden_dim, input_shape=(input_dim,), activation="tanh"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_relu_fused_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
activation="relu",
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_time_distrbuted(self):
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_sequence_lstm(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 2
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, delta=1e-4, model_precision=model_precision)
def test_tiny_sequence_lstm_half_precision(self):
return self.test_tiny_sequence_lstm(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_spatial_bn(self):
np.random.seed(1988)
x_in = Input(shape=(7, 7, 2))
x = ZeroPadding2D(padding=(1, 1))(x_in)
x = BatchNormalization(axis=2)(x)
model = Model(x_in, x)
self._test_model(model, delta=1e-2)
def test_embedding_fixed_length(self):
sequence_length = 5
vocab_size = 10
embed_channels = 4
dense_units = sequence_length * embed_channels
model = Sequential()
model.add(Embedding(vocab_size, embed_channels, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(dense_units))
model.add(Dense(20))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, one_dim_seq_flags=[True])
def test_conv1d_flatten(self, delta=1e-2):
model = Sequential()
model.add(AveragePooling1D(2, input_shape=(64, 9)))
model.add(Conv1D(16, 1, padding="same", activation="relu", use_bias=False))
model.add(MaxPooling1D(2))
model.add(Flatten())
model.add(Dense(units=7, activation="softmax", use_bias=False))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, delta=delta)
def test_dense_fused_act_in_td(self):
np.random.seed(1988)
x_in = Input(shape=(10, 2))
x = TimeDistributed(Dense(6, activation="softmax"))(x_in)
model = Model(inputs=[x_in], outputs=[x])
self._test_model(model, delta=1e-4)
def test_conv_batch_1d(self):
np.random.seed(1988)
vocabulary_size = 4
embedding_dimension = 6
input_length = 10
model = Sequential()
model.add(
Embedding(
vocabulary_size,
embedding_dimension,
input_length=input_length,
trainable=True,
)
)
model.add(Conv1D(5, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(2))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, one_dim_seq_flags=[True])
def test_lstm_td(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(
SimpleRNN(
num_channels,
return_sequences=True,
input_shape=(input_length, input_dim),
)
)
model.add(TimeDistributed(Dense(5)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
# Making sure that giant channel sizes get handled correctly
def test_large_channel_gpu(self):
input_shape = (20, 20, 3)
num_channels = 2049
kernel_size = 3
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_channels,
kernel_size=(kernel_size, kernel_size),
)
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
@pytest.mark.xfail(raises=Exception)
def test_large_batch_gpu(self):
batch_size = 2049
num_channels = 4
kernel_size = 3
model = Sequential()
model.add(
TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size))
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasTopologyCorrectnessTest(KerasNumericCorrectnessTest):
def test_dangling_merge_left(self):
x1 = Input(shape=(4,), name="input1")
x2 = Input(shape=(5,), name="input2")
y1 = Dense(6, name="dense")(x2)
z = concatenate([x1, y1])
model = Model(inputs=[x1, x2], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_dangling_merge_right(self):
x1 = Input(shape=(4,), name="input1")
x2 = Input(shape=(5,), name="input2")
y1 = Dense(6, name="dense")(x2)
z = concatenate([y1, x1])
model = Model(inputs=[x1, x2], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_shared_vision(self):
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
out = Flatten()(x)
vision_model = Model(inputs=[digit_input], outputs=[out])
# then define the tell-digits-apart model
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# the vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = concatenate([out_a, out_b])
out = Dense(1, activation="sigmoid")(concatenated)
model = Model(inputs=[digit_a, digit_b], outputs=out)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_weight_sharing(self):
# - Dense1 -----------
# x - | |- Merge
# - Dense1 - Dense2 --
x = Input(shape=(3,))
dense = Dense(4)
y1 = dense(x)
y2 = dense(x)
y3 = Dense(4)(y2)
z = concatenate([y1, y3])
model = Model(inputs=[x], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_tiny_multiple_outputs(self):
x = Input(shape=(3,))
y1 = Dense(4)(x)
y2 = Dense(5)(x)
model = Model([x], [y1, y2])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_dense(self):
x = Input(shape=(3,))
y = Dense(4, name="intermediate_dense_y")(x)
z = Dense(5, name="intermediate_dense_z")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv2d(self):
x = Input(shape=(8, 8, 3))
y = Conv2D(4, (3, 3), name="intermdiate_conv2d_1")(x)
z = Conv2D(5, (3, 3), name="intermdiate_conv2d_2")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv2d_fused_act(self):
x = Input(shape=(8, 8, 3))
y = Conv2D(4, (3, 3), name="intermdiate_conv2d_1_fused", activation="relu")(x)
z = Conv2D(5, (3, 3), name="intermdiate_conv2d_2_fused", activation="relu")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) - 0.5 for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv1d(self):
x = Input(shape=(10, 3))
y = Conv1D(4, 3, name="intermdiate_conv1d_1")(x)
z = Conv1D(5, 3, name="intermdiate_conv1d_2")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv1d_fused_act(self):
x = Input(shape=(10, 3))
y = Conv1D(4, 3, name="intermdiate_conv1d_1_fused", activation="relu")(x)
z = Conv1D(5, 3, name="intermdiate_conv1d_2_fused", activation="relu")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) - 0.5 for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_rcnn_1d(self):
x_in = Input(shape=(10, 2))
# Conv block 1
x = Conv1D(3, 3, padding="same", name="interm_rcnn_conv1")(x_in)
x = BatchNormalization(axis=-1, name="interm_rcnn_bn1")(x)
x = Activation("elu")(x)
x = MaxPooling1D(pool_size=2, name="interm_rcnn_pool1")(x)
out1 = x # out1.shape = (5,3)
x = GRU(6, name="gru1")(x)
out2 = x
model = Model(x_in, [out1, out2])
# model = Model(x_in, [out2])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
def test_tiny_mobilenet_arch(self, model_precision=_MLMODEL_FULL_PRECISION):
def ReLU6(x, name):
if keras.__version__ >= _StrictVersion("2.2.1"):
return ReLU(6.0, name=name)(x)
else:
return Activation(relu6, name=name)(x)
img_input = Input(shape=(32, 32, 3))
x = Conv2D(
4, (3, 3), padding="same", use_bias=False, strides=(2, 2), name="conv1"
)(img_input)
x = BatchNormalization(axis=-1, name="conv1_bn")(x)
x = ReLU6(x, name="conv1_relu")
x = DepthwiseConv2D(
(3, 3),
padding="same",
depth_multiplier=1,
strides=(1, 1),
use_bias=False,
name="conv_dw_1",
)(x)
x = BatchNormalization(axis=-1, name="conv_dw_1_bn")(x)
x = ReLU6(x, name="conv_dw_1_relu")
x = Conv2D(
8, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_1"
)(x)
x = BatchNormalization(axis=-1, name="conv_pw_1_bn")(x)
x = ReLU6(x, name="conv_pw_1_relu")
x = DepthwiseConv2D(
(3, 3),
padding="same",
depth_multiplier=1,
strides=(2, 2),
use_bias=False,
name="conv_dw_2",
)(x)
x = BatchNormalization(axis=-1, name="conv_dw_2_bn")(x)
x = ReLU6(x, name="conv_dw_2_relu")
x = Conv2D(
8, (1, 1), padding="same", use_bias=False, strides=(2, 2), name="conv_pw_2"
)(x)
x = BatchNormalization(axis=-1, name="conv_pw_2_bn")(x)
x = ReLU6(x, name="conv_pw_2_relu")
model = Model(inputs=[img_input], outputs=[x])
self._test_model(model, delta=1e-2, model_precision=model_precision)
def test_tiny_mobilenet_arch_half_precision(self):
self.test_tiny_mobilenet_arch(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_xception(self, model_precision=_MLMODEL_FULL_PRECISION):
img_input = Input(shape=(32, 32, 3))
x = Conv2D(2, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1")(
img_input
)
x = BatchNormalization(name="block1_conv1_bn")(x)
x = Activation("relu", name="block1_conv1_act")(x)
x = Conv2D(4, (3, 3), use_bias=False, name="block1_conv2")(x)
x = BatchNormalization(name="block1_conv2_bn")(x)
x = Activation("relu", name="block1_conv2_act")(x)
residual = Conv2D(8, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(
8, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
)(x)
x = BatchNormalization(name="block2_sepconv1_bn")(x)
x = Activation("relu", name="block2_sepconv2_act")(x)
x = SeparableConv2D(
8, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
)(x)
x = BatchNormalization(name="block2_sepconv2_bn")(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="block2_pool")(x)
x = add([x, residual])
residual = Conv2D(16, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
residual = BatchNormalization()(residual)
model = Model(inputs=[img_input], outputs=[residual])
self._test_model(model, delta=1e-2, model_precision=model_precision)
def test_tiny_xception_half_precision(self):
return self.test_tiny_xception(model_precision=_MLMODEL_HALF_PRECISION)
def test_nested_model_giving_output(self):
base_model = Sequential()
base_model.add(Conv2D(32, (1, 1), input_shape=(4, 4, 3)))
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(16, activation="relu"))
top_model.add(Dense(1, activation="sigmoid"))
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
self._test_model(model)
# similar to issue 269
def test_time_distributed_conv(self):
model = Sequential()
model.add(
TimeDistributed(
Conv2D(64, (3, 3), activation="relu"), input_shape=(1, 30, 30, 3)
)
)
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(1, 1))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(Dropout(0.5))
model.add(LSTM(32, return_sequences=False, dropout=0.5))
model.add(Dense(10, activation="sigmoid"))
self._test_model(model)
@pytest.mark.slow
@pytest.mark.keras2
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
class KerasNumericCorrectnessStressTest(KerasNumericCorrectnessTest):
"""
Unit test class for testing all combinations of a particular
layer.
"""
def _run_test(
self,
model,
param,
model_dir=None,
delta=1e-2,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
""" Run a test on a particular model
"""
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, "keras.mlmodel")
# Generate some random data
nb_inputs = len(model.inputs)
if nb_inputs > 1:
input_names = []
input_data = []
coreml_input = {}
for i in range(nb_inputs):
input_shape = [1 if a is None else a for a in model.input_shape[i]]
X = _generate_data(input_shape)
feature_name = "data_%s" % i
input_names.append(feature_name)
input_data.append(X)
if one_dim_seq_flags is None:
coreml_input[feature_name] = _keras_transpose(X).astype("f")
else:
coreml_input[feature_name] = _keras_transpose(
X, one_dim_seq_flags[i]
).astype("f")
else:
input_shape = [1 if a is None else a for a in model.input_shape]
input_names = ["data"]
input_data = _generate_data(input_shape)
if one_dim_seq_flags is None:
coreml_input = {"data": _keras_transpose(input_data).astype("f")}
else:
coreml_input = {
"data": _keras_transpose(input_data, one_dim_seq_flags[0]).astype(
"f"
)
}
# Make predictions
if transpose_keras_result:
keras_preds = _keras_transpose(model.predict(input_data)).flatten()
else:
keras_preds = model.predict(input_data).flatten()
# Get the model
coreml_model = _get_coreml_model(
model, input_names, ["output"], model_precision=model_precision
)
if _is_macos() and _macos_version() >= (10, 13):
# get prediction
coreml_preds = coreml_model.predict(coreml_input)["output"].flatten()
if use_tmp_folder:
shutil.rmtree(model_dir)
self.assertEqual(
len(coreml_preds),
len(keras_preds),
msg="Failed test case %s. Lengths wrong (%s vs %s)"
% (param, len(coreml_preds), len(keras_preds)),
)
for i in range(len(keras_preds)):
max_den = max(1.0, keras_preds[i], coreml_preds[i])
self.assertAlmostEqual(
keras_preds[i] / max_den,
coreml_preds[i] / max_den,
delta=delta,
msg="Failed test case %s. Predictions wrong (%s vs %s)"
% (param, coreml_preds[i], keras_preds[i]),
)
@pytest.mark.slow
def test_activation_layer_params(self):
options = dict(
activation=[
"tanh",
"relu",
"sigmoid",
"softmax",
"softplus",
"softsign",
"hard_sigmoid",
"elu",
]
)
# Define a function that tests a model
num_channels = 10
input_dim = 10
def build_model(x):
model = Sequential()
model.add(Dense(num_channels, input_dim=input_dim))
model.add(Activation(**dict(zip(options.keys(), x))))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._run_test(model, param)
@pytest.mark.slow
def test_dense_layer_params(self):
options = dict(
activation=[
"relu",
"softmax",
"tanh",
"sigmoid",
"softplus",
"softsign",
"elu",
"hard_sigmoid",
],
use_bias=[True, False],
)
# Define a function that tests a model
input_shape = (10,)
num_channels = 10
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Dense(num_channels, input_shape=input_shape, **kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
@pytest.mark.slow
def test_upsample_layer_params(self):
options = dict(size=[(2, 2), (3, 3), (4, 4), (5, 5)])
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
X = np.random.rand(1, *input_shape)
# Define a function that tests a model
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(filters=5, kernel_size=(7, 7), input_shape=input_shape))
model.add(UpSampling2D(**kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
@pytest.mark.slow
def test_conv_layer_params(self, model_precision=_MLMODEL_FULL_PRECISION):
options = dict(
activation=[
"relu",
"tanh",
"sigmoid",
], # keras does not support softmax on 4-D
use_bias=[True, False],
padding=["same", "valid"],
filters=[1, 3, 5],
kernel_size=[[5, 5]], # fails when sizes are different
)
# Define a function that tests a model
input_shape = (10, 10, 1)
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(input_shape=input_shape, **kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param, model_precision=model_precision)
@pytest.mark.keras2
def test_conv_layer_params_half_precision(self):
return self.test_conv_layer_params(model_precision=_MLMODEL_HALF_PRECISION)
@pytest.mark.slow
def test_dense_elementwise_params(self):
options = dict(modes=[add, multiply, concatenate, average, maximum])
def build_model(mode):
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
y1 = Dense(4)(x1)
y2 = Dense(4)(x2)
z = mode([y1, y2])
model = Model([x1, x2], z)
return mode, model
product = itertools.product(*options.values())
args = [build_model(p[0]) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
def test_vgg_16_tiny(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1000)) # activation='softmax'))
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Get the coreml model
self._test_model(model)
def test_vgg_16_tiny_no_pooling(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(1000)) # activation='softmax'))
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Get the coreml model
self._test_model(model)
def test_vgg_16_tiny_no_pooling_no_padding(
self, model_precision=_MLMODEL_FULL_PRECISION
):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1000, activation="softmax"))
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_vgg_16_tiny_no_pooling_no_padding_half_precision(self):
return self.test_vgg_16_tiny_no_pooling_no_padding(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_imdb_fasttext_first_2(self):
max_features = 10
max_len = 6
embedding_dims = 4
pool_length = 2
model = Sequential()
model.add(Embedding(max_features, embedding_dims, input_length=max_len))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_size=pool_length))
self._test_model(model, one_dim_seq_flags=[True])
def test_tiny_mcrnn_td(self):
model = Sequential()
model.add(Conv2D(3, (1, 1), input_shape=(2, 4, 4), padding="same"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Reshape((2, 3)))
model.add(TimeDistributed(Dense(5)))
self._test_model(model)
def test_tiny_mcrnn_recurrent(self):
model = Sequential()
model.add(Conv2D(3, (1, 1), input_shape=(2, 4, 4), padding="same"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Reshape((2, 3)))
model.add(LSTM(5, recurrent_activation="sigmoid"))
self._test_model(model)
def test_tiny_mcrnn_music_tagger(self):
x_in = Input(shape=(4, 6, 1))
x = ZeroPadding2D(padding=(0, 1))(x_in)
x = BatchNormalization(axis=2, name="bn_0_freq")(x)
# Conv block 1
x = Conv2D(2, (3, 3), padding="same", name="conv1")(x)
x = BatchNormalization(axis=3, name="bn1")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool1")(x)
# Conv block 2
x = Conv2D(4, (3, 3), padding="same", name="conv2")(x)
x = BatchNormalization(axis=3, name="bn2")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool2")(x)
# Should get you (1,1,2,4)
x = Reshape((2, 4))(x)
x = GRU(32, return_sequences=True, name="gru1")(x)
x = GRU(32, return_sequences=False, name="gru2")(x)
# Create model.
model = Model(x_in, x)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
def test_tiny_apple_manual(self):
model = Sequential()
model.add(LSTM(3, input_shape=(4, 5), recurrent_activation="sigmoid"))
model.add(Dense(5))
model.add(Activation("softmax"))
self._test_model(model)
def test_tiny_image_captioning_image_branch(self):
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
image_branch = Model(inputs=[img_input], outputs=[x])
self._test_model(image_branch)
def test_tiny_image_captioning_feature_merge(self):
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model([img_input_1], [x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
z = LSTM(4, return_sequences=True, name="cap_lstm")(z)
z = TimeDistributed(Dense(8), name="cap_timedistributed")(z)
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_babi_rnn(self):
vocab_size = 10
embed_hidden_size = 8
story_maxlen = 5
query_maxlen = 5
input_tensor_1 = Input(shape=(story_maxlen,))
x1 = Embedding(vocab_size, embed_hidden_size)(input_tensor_1)
x1 = Dropout(0.3)(x1)
input_tensor_2 = Input(shape=(query_maxlen,))
x2 = Embedding(vocab_size, embed_hidden_size)(input_tensor_2)
x2 = Dropout(0.3)(x2)
x2 = LSTM(embed_hidden_size, return_sequences=False)(x2)
x2 = RepeatVector(story_maxlen)(x2)
x3 = add([x1, x2])
x3 = LSTM(embed_hidden_size, return_sequences=False)(x3)
x3 = Dropout(0.3)(x3)
x3 = Dense(vocab_size, activation="softmax")(x3)
model = Model(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])
self._test_model(model, one_dim_seq_flags=[True, True])
def test_clickbait_cnn(self, model_precision=_MLMODEL_FULL_PRECISION):
# from: https://github.com/saurabhmathur96/clickbait-detector
vocabulary_size = 500
embedding_dimension = 30
input_length = 20
model = Sequential()
model.add(
Embedding(
vocabulary_size,
embedding_dimension,
input_length=input_length,
trainable=True,
)
)
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(17))
model.add(Flatten())
model.add(Dense(1, use_bias=True))
model.add(BatchNormalization())
model.add(Activation("sigmoid"))
self._test_model(
model, one_dim_seq_flags=[True], model_precision=model_precision
)
def test_clickbait_cnn_half_precision(self):
return self.test_clickbait_cnn(model_precision=_MLMODEL_HALF_PRECISION)
def test_model_with_duplicated_edges(self):
# Create a simple model
inputs = Input(shape=(20, 20))
activation = Activation("relu")(inputs)
cropping = Cropping1D(cropping=(1, 1))(activation)
conv1d = Conv1D(20, 3, padding="valid")(activation)
ouputs = Add()([conv1d, cropping])
model = Model(inputs, ouputs)
self._test_model(model)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasBasicConversionTest(KerasNumericCorrectnessTest):
def test_float_arraytype_flag(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(1000, input_shape=(100,)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Convert model
from coremltools.converters import keras as keras_converter
coreml_model = keras_converter.convert(model, use_float_arraytype=True)
spec = coreml_model.get_spec()
from coremltools.proto import Model_pb2 as _Model_pb2
self.assertEqual(
spec.description.input[0].type.multiArrayType.dataType,
_Model_pb2.ArrayFeatureType.FLOAT32,
)
self.assertEqual(
spec.description.output[0].type.multiArrayType.dataType,
_Model_pb2.ArrayFeatureType.FLOAT32,
)
if __name__ == "__main__":
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(KerasBasicNumericCorrectnessTest("test_lstm_concat_dense_random"))
# unittest.TextTestRunner().run(suite)
| [
"numpy.random.seed",
"distutils.version.StrictVersion",
"keras.layers.dot",
"keras.layers.Cropping2D",
"numpy.ones",
"keras.models.Model",
"numpy.product",
"keras.layers.ZeroPadding1D",
"keras.layers.ZeroPadding2D",
"keras.layers.Input",
"keras.layers.Cropping1D",
"keras.layers.concatenate",
... | [((3825, 3894), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (3840, 3894), False, 'import unittest\n'), ((8791, 8860), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (8806, 8860), False, 'import unittest\n'), ((78144, 78213), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (78159, 78213), False, 'import unittest\n'), ((88196, 88265), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (88211, 88265), False, 'import unittest\n'), ((109425, 109494), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (109440, 109494), False, 'import unittest\n'), ((3073, 3258), 'coremltools.converters.keras.convert', 'keras_converter.convert', (['model', 'input_names', 'output_names'], {'input_name_shape_dict': 'input_name_shape_dict', 'model_precision': 'model_precision', 'use_float_arraytype': 'use_float_arraytype'}), '(model, input_names, output_names,\n input_name_shape_dict=input_name_shape_dict, model_precision=\n model_precision, use_float_arraytype=use_float_arraytype)\n', (3096, 3258), True, 'from coremltools.converters import keras as keras_converter\n'), ((77681, 77716), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'Exception'}), '(raises=Exception)\n', (77698, 77716), False, 'import pytest\n'), ((110483, 110498), 'unittest.main', 'unittest.main', ([], {}), '()\n', (110496, 110498), False, 'import unittest\n'), ((1492, 1515), 'distutils.version.StrictVersion', '_StrictVersion', (['"""2.2.1"""'], {}), "('2.2.1')\n", (1506, 1515), True, 'from distutils.version import StrictVersion as _StrictVersion\n'), ((1965, 1997), 'numpy.transpose', 'np.transpose', (['x', '[1, 0, 4, 2, 3]'], {}), '(x, [1, 0, 4, 2, 3])\n', (1977, 1997), True, 'import numpy as np\n'), ((2099, 2128), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (2111, 2128), True, 'import numpy as np\n'), ((2144, 2169), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2158, 2169), True, 'import numpy as np\n'), ((3476, 3497), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (3484, 3497), True, 'import numpy as np\n'), ((9038, 9058), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (9052, 9058), True, 'import numpy as np\n'), ((9101, 9113), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9111, 9113), False, 'from keras.models import Sequential, Model\n'), ((9921, 9941), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (9935, 9941), True, 'import numpy as np\n'), ((9984, 9996), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9994, 9996), False, 'from keras.models import Sequential, Model\n'), ((10443, 10463), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (10457, 10463), True, 'import numpy as np\n'), ((10506, 10518), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10516, 10518), False, 'from keras.models import Sequential, Model\n'), ((10810, 10830), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (10824, 10830), True, 'import numpy as np\n'), ((10873, 10885), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10883, 10885), False, 'from keras.models import Sequential, Model\n'), ((11174, 11194), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (11188, 11194), True, 'import numpy as np\n'), ((11237, 11249), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (11247, 11249), False, 'from keras.models import Sequential, Model\n'), ((11544, 11564), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (11558, 11564), True, 'import numpy as np\n'), ((11655, 11667), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (11665, 11667), False, 'from keras.models import Sequential, Model\n'), ((12086, 12106), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (12100, 12106), True, 'import numpy as np\n'), ((12279, 12291), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12289, 12291), False, 'from keras.models import Sequential, Model\n'), ((12911, 12931), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (12925, 12931), True, 'import numpy as np\n'), ((13104, 13116), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13114, 13116), False, 'from keras.models import Sequential, Model\n'), ((13769, 13789), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (13783, 13789), True, 'import numpy as np\n'), ((13957, 13969), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13967, 13969), False, 'from keras.models import Sequential, Model\n'), ((14698, 14718), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (14712, 14718), True, 'import numpy as np\n'), ((14891, 14903), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14901, 14903), False, 'from keras.models import Sequential, Model\n'), ((15609, 15629), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (15623, 15629), True, 'import numpy as np\n'), ((15781, 15793), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (15791, 15793), False, 'from keras.models import Sequential, Model\n'), ((16523, 16543), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (16537, 16543), True, 'import numpy as np\n'), ((16686, 16698), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (16696, 16698), False, 'from keras.models import Sequential, Model\n'), ((17317, 17337), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (17331, 17337), True, 'import numpy as np\n'), ((17451, 17463), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17461, 17463), False, 'from keras.models import Sequential, Model\n'), ((17937, 17957), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (17951, 17957), True, 'import numpy as np\n'), ((18071, 18083), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (18081, 18083), False, 'from keras.models import Sequential, Model\n'), ((18688, 18708), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (18702, 18708), True, 'import numpy as np\n'), ((18822, 18834), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (18832, 18834), False, 'from keras.models import Sequential, Model\n'), ((19525, 19545), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (19539, 19545), True, 'import numpy as np\n'), ((19659, 19671), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (19669, 19671), False, 'from keras.models import Sequential, Model\n'), ((20132, 20152), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (20146, 20152), True, 'import numpy as np\n'), ((20275, 20287), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (20285, 20287), False, 'from keras.models import Sequential, Model\n'), ((20765, 20785), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (20779, 20785), True, 'import numpy as np\n'), ((20974, 20986), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (20984, 20986), False, 'from keras.models import Sequential, Model\n'), ((21454, 21474), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (21468, 21474), True, 'import numpy as np\n'), ((21663, 21675), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (21673, 21675), False, 'from keras.models import Sequential, Model\n'), ((22186, 22206), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (22200, 22206), True, 'import numpy as np\n'), ((22395, 22407), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (22405, 22407), False, 'from keras.models import Sequential, Model\n'), ((23046, 23058), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23056, 23058), False, 'from keras.models import Sequential, Model\n'), ((23279, 23291), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23289, 23291), False, 'from keras.models import Sequential, Model\n'), ((23767, 23787), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (23781, 23787), True, 'import numpy as np\n'), ((23976, 23988), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23986, 23988), False, 'from keras.models import Sequential, Model\n'), ((24684, 24704), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (24698, 24704), True, 'import numpy as np\n'), ((24893, 24905), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (24903, 24905), False, 'from keras.models import Sequential, Model\n'), ((25670, 25690), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (25684, 25690), True, 'import numpy as np\n'), ((25880, 25892), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (25890, 25892), False, 'from keras.models import Sequential, Model\n'), ((26410, 26430), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (26424, 26430), True, 'import numpy as np\n'), ((26620, 26632), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (26630, 26632), False, 'from keras.models import Sequential, Model\n'), ((27177, 27197), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (27191, 27197), True, 'import numpy as np\n'), ((27391, 27403), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (27401, 27403), False, 'from keras.models import Sequential, Model\n'), ((27932, 27952), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (27946, 27952), True, 'import numpy as np\n'), ((28146, 28158), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (28156, 28158), False, 'from keras.models import Sequential, Model\n'), ((28704, 28724), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (28718, 28724), True, 'import numpy as np\n'), ((28918, 28930), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (28928, 28930), False, 'from keras.models import Sequential, Model\n'), ((29476, 29496), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (29490, 29496), True, 'import numpy as np\n'), ((29690, 29702), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (29700, 29702), False, 'from keras.models import Sequential, Model\n'), ((30228, 30248), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (30242, 30248), True, 'import numpy as np\n'), ((30466, 30478), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (30476, 30478), False, 'from keras.models import Sequential, Model\n'), ((31046, 31066), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (31060, 31066), True, 'import numpy as np\n'), ((31284, 31296), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (31294, 31296), False, 'from keras.models import Sequential, Model\n'), ((31910, 31930), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (31924, 31930), True, 'import numpy as np\n'), ((32149, 32161), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (32159, 32161), False, 'from keras.models import Sequential, Model\n'), ((32802, 32822), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (32816, 32822), True, 'import numpy as np\n'), ((33041, 33053), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (33051, 33053), False, 'from keras.models import Sequential, Model\n'), ((33946, 33966), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (33960, 33966), True, 'import numpy as np\n'), ((34139, 34151), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (34149, 34151), False, 'from keras.models import Sequential, Model\n'), ((34918, 34938), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (34932, 34938), True, 'import numpy as np\n'), ((35090, 35102), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (35100, 35102), False, 'from keras.models import Sequential, Model\n'), ((35871, 35883), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (35881, 35883), False, 'from keras.models import Sequential, Model\n'), ((36209, 36221), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (36219, 36221), False, 'from keras.models import Sequential, Model\n'), ((36524, 36536), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (36534, 36536), False, 'from keras.models import Sequential, Model\n'), ((36840, 36852), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (36850, 36852), False, 'from keras.models import Sequential, Model\n'), ((37149, 37161), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (37159, 37161), False, 'from keras.models import Sequential, Model\n'), ((37363, 37375), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (37373, 37375), False, 'from keras.models import Sequential, Model\n'), ((37696, 37708), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (37706, 37708), False, 'from keras.models import Sequential, Model\n'), ((38025, 38037), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38035, 38037), False, 'from keras.models import Sequential, Model\n'), ((38189, 38201), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38199, 38201), False, 'from keras.models import Sequential, Model\n'), ((38351, 38371), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (38365, 38371), True, 'import numpy as np\n'), ((38485, 38497), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38495, 38497), False, 'from keras.models import Sequential, Model\n'), ((38839, 38859), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (38853, 38859), True, 'import numpy as np\n'), ((38973, 38985), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38983, 38985), False, 'from keras.models import Sequential, Model\n'), ((39343, 39363), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (39357, 39363), True, 'import numpy as np\n'), ((39477, 39489), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (39487, 39489), False, 'from keras.models import Sequential, Model\n'), ((39842, 39862), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (39856, 39862), True, 'import numpy as np\n'), ((40051, 40063), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (40061, 40063), False, 'from keras.models import Sequential, Model\n'), ((40544, 40564), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (40558, 40564), True, 'import numpy as np\n'), ((40678, 40690), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (40688, 40690), False, 'from keras.models import Sequential, Model\n'), ((41229, 41249), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (41243, 41249), True, 'import numpy as np\n'), ((41363, 41375), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (41373, 41375), False, 'from keras.models import Sequential, Model\n'), ((42122, 42142), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (42136, 42142), True, 'import numpy as np\n'), ((42256, 42268), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (42266, 42268), False, 'from keras.models import Sequential, Model\n'), ((42970, 42990), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (42984, 42990), True, 'import numpy as np\n'), ((43007, 43019), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (43017, 43019), False, 'from keras.models import Sequential, Model\n'), ((43310, 43322), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (43320, 43322), False, 'from keras.models import Sequential, Model\n'), ((43788, 43800), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (43798, 43800), False, 'from keras.models import Sequential, Model\n'), ((44310, 44330), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (44324, 44330), True, 'import numpy as np\n'), ((44446, 44458), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (44456, 44458), False, 'from keras.models import Sequential, Model\n'), ((44784, 44804), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (44798, 44804), True, 'import numpy as np\n'), ((44919, 44931), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (44929, 44931), False, 'from keras.models import Sequential, Model\n'), ((45283, 45303), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (45297, 45303), True, 'import numpy as np\n'), ((45418, 45430), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (45428, 45430), False, 'from keras.models import Sequential, Model\n'), ((45874, 45894), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (45888, 45894), True, 'import numpy as np\n'), ((45985, 45997), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (45995, 45997), False, 'from keras.models import Sequential, Model\n'), ((46379, 46399), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (46393, 46399), True, 'import numpy as np\n'), ((46490, 46502), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (46500, 46502), False, 'from keras.models import Sequential, Model\n'), ((47001, 47021), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (47015, 47021), True, 'import numpy as np\n'), ((47138, 47150), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (47148, 47150), False, 'from keras.models import Sequential, Model\n'), ((47506, 47526), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (47520, 47526), True, 'import numpy as np\n'), ((47616, 47628), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (47626, 47628), False, 'from keras.models import Sequential, Model\n'), ((48077, 48097), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (48091, 48097), True, 'import numpy as np\n'), ((48187, 48199), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (48197, 48199), False, 'from keras.models import Sequential, Model\n'), ((48649, 48669), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (48663, 48669), True, 'import numpy as np\n'), ((48760, 48772), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (48770, 48772), False, 'from keras.models import Sequential, Model\n'), ((49222, 49242), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (49236, 49242), True, 'import numpy as np\n'), ((49333, 49345), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (49343, 49345), False, 'from keras.models import Sequential, Model\n'), ((49775, 49795), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (49789, 49795), True, 'import numpy as np\n'), ((49861, 49873), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (49871, 49873), False, 'from keras.models import Sequential, Model\n'), ((50185, 50205), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (50199, 50205), True, 'import numpy as np\n'), ((50271, 50283), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (50281, 50283), False, 'from keras.models import Sequential, Model\n'), ((50705, 50725), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (50719, 50725), True, 'import numpy as np\n'), ((50842, 50854), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (50852, 50854), False, 'from keras.models import Sequential, Model\n'), ((51326, 51346), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (51340, 51346), True, 'import numpy as np\n'), ((51461, 51473), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (51471, 51473), False, 'from keras.models import Sequential, Model\n'), ((51991, 52011), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (52005, 52011), True, 'import numpy as np\n'), ((52127, 52139), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (52137, 52139), False, 'from keras.models import Sequential, Model\n'), ((52682, 52702), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (52696, 52702), True, 'import numpy as np\n'), ((52841, 52853), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (52851, 52853), False, 'from keras.models import Sequential, Model\n'), ((53534, 53554), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (53548, 53554), True, 'import numpy as np\n'), ((53670, 53682), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (53680, 53682), False, 'from keras.models import Sequential, Model\n'), ((54206, 54226), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (54220, 54226), True, 'import numpy as np\n'), ((54343, 54355), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (54353, 54355), False, 'from keras.models import Sequential, Model\n'), ((54985, 55005), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (54999, 55005), True, 'import numpy as np\n'), ((55096, 55108), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (55106, 55108), False, 'from keras.models import Sequential, Model\n'), ((55525, 55545), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (55539, 55545), True, 'import numpy as np\n'), ((55636, 55648), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (55646, 55648), False, 'from keras.models import Sequential, Model\n'), ((56360, 56380), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (56374, 56380), True, 'import numpy as np\n'), ((56519, 56531), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (56529, 56531), False, 'from keras.models import Sequential, Model\n'), ((57294, 57314), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (57308, 57314), True, 'import numpy as np\n'), ((57453, 57465), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (57463, 57465), False, 'from keras.models import Sequential, Model\n'), ((58178, 58198), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (58192, 58198), True, 'import numpy as np\n'), ((58314, 58326), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (58324, 58326), False, 'from keras.models import Sequential, Model\n'), ((58815, 58835), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (58829, 58835), True, 'import numpy as np\n'), ((58952, 58964), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (58962, 58964), False, 'from keras.models import Sequential, Model\n'), ((59458, 59478), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (59472, 59478), True, 'import numpy as np\n'), ((59594, 59606), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (59604, 59606), False, 'from keras.models import Sequential, Model\n'), ((60222, 60242), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (60236, 60242), True, 'import numpy as np\n'), ((60358, 60370), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (60368, 60370), False, 'from keras.models import Sequential, Model\n'), ((62198, 62218), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (62212, 62218), True, 'import numpy as np\n'), ((62320, 62332), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (62330, 62332), False, 'from keras.models import Sequential, Model\n'), ((62690, 62710), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (62704, 62710), True, 'import numpy as np\n'), ((62814, 62826), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (62824, 62826), False, 'from keras.models import Sequential, Model\n'), ((63408, 63428), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (63422, 63428), True, 'import numpy as np\n'), ((63536, 63548), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (63546, 63548), False, 'from keras.models import Sequential, Model\n'), ((63950, 63970), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (63964, 63970), True, 'import numpy as np\n'), ((64084, 64096), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (64094, 64096), False, 'from keras.models import Sequential, Model\n'), ((64489, 64509), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (64503, 64509), True, 'import numpy as np\n'), ((64607, 64632), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (64612, 64632), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((64767, 64788), 'keras.layers.concatenate', 'concatenate', (['[x2, x3]'], {}), '([x2, x3])\n', (64778, 64788), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((64843, 64885), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (64848, 64885), False, 'from keras.models import Sequential, Model\n'), ((65120, 65140), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (65134, 65140), True, 'import numpy as np\n'), ((65288, 65311), 'keras.layers.Input', 'Input', ([], {'shape': '(seq_len,)'}), '(shape=(seq_len,))\n', (65293, 65311), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((65459, 65488), 'keras.layers.concatenate', 'concatenate', (['[x1, x2]'], {'axis': '(1)'}), '([x1, x2], axis=1)\n', (65470, 65488), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((65506, 65548), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x3]'}), '(inputs=[input_tensor], outputs=[x3])\n', (65511, 65548), False, 'from keras.models import Sequential, Model\n'), ((65811, 65831), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (65825, 65831), True, 'import numpy as np\n'), ((65942, 65968), 'keras.layers.Input', 'Input', ([], {'shape': '(seq_length,)'}), '(shape=(seq_length,))\n', (65947, 65968), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((65983, 66011), 'keras.layers.Input', 'Input', ([], {'shape': '(seq_length, 1)'}), '(shape=(seq_length, 1))\n', (65988, 66011), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66412, 66453), 'keras.models.Model', 'Model', ([], {'inputs': '[input, pos]', 'outputs': 'model'}), '(inputs=[input, pos], outputs=model)\n', (66417, 66453), False, 'from keras.models import Sequential, Model\n'), ((66713, 66733), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (66727, 66733), True, 'import numpy as np\n'), ((66831, 66856), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (66836, 66856), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66991, 67004), 'keras.layers.add', 'add', (['[x2, x3]'], {}), '([x2, x3])\n', (66994, 67004), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((67059, 67101), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (67064, 67101), False, 'from keras.models import Sequential, Model\n'), ((67329, 67349), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (67343, 67349), True, 'import numpy as np\n'), ((67447, 67472), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (67452, 67472), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((67607, 67625), 'keras.layers.multiply', 'multiply', (['[x2, x3]'], {}), '([x2, x3])\n', (67615, 67625), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((67680, 67722), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (67685, 67722), False, 'from keras.models import Sequential, Model\n'), ((67950, 67970), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (67964, 67970), True, 'import numpy as np\n'), ((68068, 68093), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (68073, 68093), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((68228, 68266), 'keras.layers.dot', 'dot', (['[x2, x3]'], {'axes': '(-1)', 'normalize': '(True)'}), '([x2, x3], axes=-1, normalize=True)\n', (68231, 68266), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((68321, 68363), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (68326, 68363), False, 'from keras.models import Sequential, Model\n'), ((68633, 68645), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (68643, 68645), False, 'from keras.models import Sequential, Model\n'), ((68980, 68992), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (68990, 68992), False, 'from keras.models import Sequential, Model\n'), ((69335, 69347), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (69345, 69347), False, 'from keras.models import Sequential, Model\n'), ((70110, 70122), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (70120, 70122), False, 'from keras.models import Sequential, Model\n'), ((70284, 70304), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (70298, 70304), True, 'import numpy as np\n'), ((70539, 70551), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (70549, 70551), False, 'from keras.models import Sequential, Model\n'), ((71086, 71106), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (71100, 71106), True, 'import numpy as np\n'), ((71341, 71353), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (71351, 71353), False, 'from keras.models import Sequential, Model\n'), ((71901, 71921), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (71915, 71921), True, 'import numpy as np\n'), ((72033, 72045), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (72043, 72045), False, 'from keras.models import Sequential, Model\n'), ((72367, 72387), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (72381, 72387), True, 'import numpy as np\n'), ((72622, 72634), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (72632, 72634), False, 'from keras.models import Sequential, Model\n'), ((73151, 73163), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (73161, 73163), False, 'from keras.models import Sequential, Model\n'), ((73438, 73458), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (73452, 73458), True, 'import numpy as np\n'), ((73573, 73585), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (73583, 73585), False, 'from keras.models import Sequential, Model\n'), ((74262, 74282), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (74276, 74282), True, 'import numpy as np\n'), ((74298, 74320), 'keras.layers.Input', 'Input', ([], {'shape': '(7, 7, 2)'}), '(shape=(7, 7, 2))\n', (74303, 74320), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((74427, 74441), 'keras.models.Model', 'Model', (['x_in', 'x'], {}), '(x_in, x)\n', (74432, 74441), False, 'from keras.models import Sequential, Model\n'), ((74682, 74694), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (74692, 74694), False, 'from keras.models import Sequential, Model\n'), ((75084, 75096), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (75094, 75096), False, 'from keras.models import Sequential, Model\n'), ((75557, 75577), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (75571, 75577), True, 'import numpy as np\n'), ((75593, 75613), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 2)'}), '(shape=(10, 2))\n', (75598, 75613), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((75696, 75729), 'keras.models.Model', 'Model', ([], {'inputs': '[x_in]', 'outputs': '[x]'}), '(inputs=[x_in], outputs=[x])\n', (75701, 75729), False, 'from keras.models import Sequential, Model\n'), ((75818, 75838), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (75832, 75838), True, 'import numpy as np\n'), ((75942, 75954), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (75952, 75954), False, 'from keras.models import Sequential, Model\n'), ((76492, 76512), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (76506, 76512), True, 'import numpy as np\n'), ((76627, 76639), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (76637, 76639), False, 'from keras.models import Sequential, Model\n'), ((77299, 77311), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (77309, 77311), False, 'from keras.models import Sequential, Model\n'), ((77846, 77858), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (77856, 77858), False, 'from keras.models import Sequential, Model\n'), ((78353, 78385), 'keras.layers.Input', 'Input', ([], {'shape': '(4,)', 'name': '"""input1"""'}), "(shape=(4,), name='input1')\n", (78358, 78385), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78399, 78431), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)', 'name': '"""input2"""'}), "(shape=(5,), name='input2')\n", (78404, 78431), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78484, 78505), 'keras.layers.concatenate', 'concatenate', (['[x1, y1]'], {}), '([x1, y1])\n', (78495, 78505), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((78522, 78557), 'keras.models.Model', 'Model', ([], {'inputs': '[x1, x2]', 'outputs': '[z]'}), '(inputs=[x1, x2], outputs=[z])\n', (78527, 78557), False, 'from keras.models import Sequential, Model\n'), ((78731, 78763), 'keras.layers.Input', 'Input', ([], {'shape': '(4,)', 'name': '"""input1"""'}), "(shape=(4,), name='input1')\n", (78736, 78763), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78777, 78809), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)', 'name': '"""input2"""'}), "(shape=(5,), name='input2')\n", (78782, 78809), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78862, 78883), 'keras.layers.concatenate', 'concatenate', (['[y1, x1]'], {}), '([y1, x1])\n', (78873, 78883), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((78900, 78935), 'keras.models.Model', 'Model', ([], {'inputs': '[x1, x2]', 'outputs': '[z]'}), '(inputs=[x1, x2], outputs=[z])\n', (78905, 78935), False, 'from keras.models import Sequential, Model\n'), ((79110, 79134), 'keras.layers.Input', 'Input', ([], {'shape': '(27, 27, 1)'}), '(shape=(27, 27, 1))\n', (79115, 79134), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((79264, 79306), 'keras.models.Model', 'Model', ([], {'inputs': '[digit_input]', 'outputs': '[out]'}), '(inputs=[digit_input], outputs=[out])\n', (79269, 79306), False, 'from keras.models import Sequential, Model\n'), ((79376, 79400), 'keras.layers.Input', 'Input', ([], {'shape': '(27, 27, 1)'}), '(shape=(27, 27, 1))\n', (79381, 79400), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((79419, 79443), 'keras.layers.Input', 'Input', ([], {'shape': '(27, 27, 1)'}), '(shape=(27, 27, 1))\n', (79424, 79443), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((79604, 79631), 'keras.layers.concatenate', 'concatenate', (['[out_a, out_b]'], {}), '([out_a, out_b])\n', (79615, 79631), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((79707, 79752), 'keras.models.Model', 'Model', ([], {'inputs': '[digit_a, digit_b]', 'outputs': 'out'}), '(inputs=[digit_a, digit_b], outputs=out)\n', (79712, 79752), False, 'from keras.models import Sequential, Model\n'), ((80035, 80052), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (80040, 80052), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((80069, 80077), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (80074, 80077), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80160, 80181), 'keras.layers.concatenate', 'concatenate', (['[y1, y3]'], {}), '([y1, y3])\n', (80171, 80181), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((80198, 80228), 'keras.models.Model', 'Model', ([], {'inputs': '[x]', 'outputs': '[z]'}), '(inputs=[x], outputs=[z])\n', (80203, 80228), False, 'from keras.models import Sequential, Model\n'), ((80427, 80444), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (80432, 80444), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((80511, 80531), 'keras.models.Model', 'Model', (['[x]', '[y1, y2]'], {}), '([x], [y1, y2])\n', (80516, 80531), False, 'from keras.models import Sequential, Model\n'), ((80735, 80752), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (80740, 80752), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((80875, 80893), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (80880, 80893), False, 'from keras.models import Sequential, Model\n'), ((81098, 81120), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 3)'}), '(shape=(8, 8, 3))\n', (81103, 81120), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((81261, 81279), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (81266, 81279), False, 'from keras.models import Sequential, Model\n'), ((81494, 81516), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 3)'}), '(shape=(8, 8, 3))\n', (81499, 81516), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((81707, 81725), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (81712, 81725), False, 'from keras.models import Sequential, Model\n'), ((81936, 81956), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 3)'}), '(shape=(10, 3))\n', (81941, 81956), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((82087, 82105), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (82092, 82105), False, 'from keras.models import Sequential, Model\n'), ((82319, 82339), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 3)'}), '(shape=(10, 3))\n', (82324, 82339), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((82520, 82538), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (82525, 82538), False, 'from keras.models import Sequential, Model\n'), ((82745, 82765), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 2)'}), '(shape=(10, 2))\n', (82750, 82765), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((83137, 83162), 'keras.models.Model', 'Model', (['x_in', '[out1, out2]'], {}), '(x_in, [out1, out2])\n', (83142, 83162), False, 'from keras.models import Sequential, Model\n'), ((83582, 83606), 'keras.layers.Input', 'Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (83587, 83606), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((84943, 84981), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input]', 'outputs': '[x]'}), '(inputs=[img_input], outputs=[x])\n', (84948, 84981), False, 'from keras.models import Sequential, Model\n'), ((85291, 85315), 'keras.layers.Input', 'Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (85296, 85315), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((86401, 86419), 'keras.layers.add', 'add', (['[x, residual]'], {}), '([x, residual])\n', (86404, 86419), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((86577, 86622), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input]', 'outputs': '[residual]'}), '(inputs=[img_input], outputs=[residual])\n', (86582, 86622), False, 'from keras.models import Sequential, Model\n'), ((86900, 86912), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (86910, 86912), False, 'from keras.models import Sequential, Model\n'), ((87000, 87012), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (87010, 87012), False, 'from keras.models import Sequential, Model\n'), ((87395, 87407), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (87405, 87407), False, 'from keras.models import Sequential, Model\n'), ((88877, 88917), 'os.path.join', 'os.path.join', (['model_dir', '"""keras.mlmodel"""'], {}), "(model_dir, 'keras.mlmodel')\n", (88889, 88917), False, 'import os\n'), ((93770, 93790), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (93784, 93790), True, 'import numpy as np\n'), ((93874, 93905), 'numpy.random.rand', 'np.random.rand', (['(1)', '*input_shape'], {}), '(1, *input_shape)\n', (93888, 93905), True, 'import numpy as np\n'), ((96592, 96604), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (96602, 96604), False, 'from keras.models import Sequential, Model\n'), ((98748, 98760), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (98758, 98760), False, 'from keras.models import Sequential, Model\n'), ((100974, 100986), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (100984, 100986), False, 'from keras.models import Sequential, Model\n'), ((102447, 102459), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (102457, 102459), False, 'from keras.models import Sequential, Model\n'), ((102821, 102833), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (102831, 102833), False, 'from keras.models import Sequential, Model\n'), ((103136, 103148), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (103146, 103148), False, 'from keras.models import Sequential, Model\n'), ((103467, 103489), 'keras.layers.Input', 'Input', ([], {'shape': '(4, 6, 1)'}), '(shape=(4, 6, 1))\n', (103472, 103489), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((104323, 104337), 'keras.models.Model', 'Model', (['x_in', 'x'], {}), '(x_in, x)\n', (104328, 104337), False, 'from keras.models import Sequential, Model\n'), ((104545, 104557), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (104555, 104557), False, 'from keras.models import Sequential, Model\n'), ((104817, 104841), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (104822, 104841), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((104930, 104970), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input_1]', 'outputs': '[x]'}), '(inputs=[img_input_1], outputs=[x])\n', (104935, 104970), False, 'from keras.models import Sequential, Model\n'), ((104992, 105016), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (104997, 105016), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105166, 105204), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input]', 'outputs': '[x]'}), '(inputs=[img_input], outputs=[x])\n', (105171, 105204), False, 'from keras.models import Sequential, Model\n'), ((105324, 105348), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (105329, 105348), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105437, 105462), 'keras.models.Model', 'Model', (['[img_input_1]', '[x]'], {}), '([img_input_1], [x])\n', (105442, 105462), False, 'from keras.models import Sequential, Model\n'), ((105484, 105508), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (105489, 105508), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105661, 105678), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (105666, 105678), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105775, 105820), 'keras.layers.concatenate', 'concatenate', (['[x, y]'], {'axis': '(1)', 'name': '"""cap_merge"""'}), "([x, y], axis=1, name='cap_merge')\n", (105786, 105820), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((105847, 105901), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input, sentence_input]', 'outputs': '[z]'}), '(inputs=[img_input, sentence_input], outputs=[z])\n', (105852, 105901), False, 'from keras.models import Sequential, Model\n'), ((106094, 106118), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (106099, 106118), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106207, 106247), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input_1]', 'outputs': '[x]'}), '(inputs=[img_input_1], outputs=[x])\n', (106212, 106247), False, 'from keras.models import Sequential, Model\n'), ((106269, 106293), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (106274, 106293), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106446, 106463), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (106451, 106463), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106560, 106605), 'keras.layers.concatenate', 'concatenate', (['[x, y]'], {'axis': '(1)', 'name': '"""cap_merge"""'}), "([x, y], axis=1, name='cap_merge')\n", (106571, 106605), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((106764, 106818), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input, sentence_input]', 'outputs': '[z]'}), '(inputs=[img_input, sentence_input], outputs=[z])\n', (106769, 106818), False, 'from keras.models import Sequential, Model\n'), ((107058, 107086), 'keras.layers.Input', 'Input', ([], {'shape': '(story_maxlen,)'}), '(shape=(story_maxlen,))\n', (107063, 107086), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107213, 107241), 'keras.layers.Input', 'Input', ([], {'shape': '(query_maxlen,)'}), '(shape=(query_maxlen,))\n', (107218, 107241), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107465, 107478), 'keras.layers.add', 'add', (['[x1, x2]'], {}), '([x1, x2])\n', (107468, 107478), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((107648, 107708), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor_1, input_tensor_2]', 'outputs': '[x3]'}), '(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])\n', (107653, 107708), False, 'from keras.models import Sequential, Model\n'), ((108026, 108038), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (108036, 108038), False, 'from keras.models import Sequential, Model\n'), ((109119, 109140), 'keras.layers.Input', 'Input', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (109124, 109140), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((109368, 109389), 'keras.models.Model', 'Model', (['inputs', 'ouputs'], {}), '(inputs, ouputs)\n', (109373, 109389), False, 'from keras.models import Sequential, Model\n'), ((109625, 109645), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (109639, 109645), True, 'import numpy as np\n'), ((109687, 109699), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (109697, 109699), False, 'from keras.models import Sequential, Model\n'), ((109984, 110040), 'coremltools.converters.keras.convert', 'keras_converter.convert', (['model'], {'use_float_arraytype': '(True)'}), '(model, use_float_arraytype=True)\n', (110007, 110040), True, 'from coremltools.converters import keras as keras_converter\n'), ((1602, 1625), 'distutils.version.StrictVersion', '_StrictVersion', (['"""2.2.0"""'], {}), "('2.2.0')\n", (1616, 1625), True, 'from distutils.version import StrictVersion as _StrictVersion\n'), ((2280, 2306), 'numpy.transpose', 'np.transpose', (['x', '[1, 0, 2]'], {}), '(x, [1, 0, 2])\n', (2292, 2306), True, 'import numpy as np\n'), ((3535, 3555), 'numpy.ones', 'np.ones', (['input_shape'], {}), '(input_shape)\n', (3542, 3555), True, 'import numpy as np\n'), ((6941, 6959), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6957, 6959), False, 'import tempfile\n'), ((9132, 9158), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(2,)'}), '(2, input_shape=(2,))\n', (9137, 9158), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((10015, 10046), 'keras.layers.Dense', 'Dense', (['(1000)'], {'input_shape': '(100,)'}), '(1000, input_shape=(100,))\n', (10020, 10046), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((10537, 10587), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(32,)', 'activation': '"""softmax"""'}), "(32, input_shape=(32,), activation='softmax')\n", (10542, 10587), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((10904, 10950), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(32,)', 'activation': '"""elu"""'}), "(32, input_shape=(32,), activation='elu')\n", (10909, 10950), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11268, 11315), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(32,)', 'activation': '"""selu"""'}), "(32, input_shape=(32,), activation='selu')\n", (11273, 11315), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11686, 11727), 'keras.layers.Dense', 'Dense', (['num_hidden'], {'input_dim': 'num_features'}), '(num_hidden, input_dim=num_features)\n', (11691, 11727), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11747, 11765), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11757, 11765), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11785, 11817), 'keras.layers.Dense', 'Dense', (['(1)'], {'input_dim': 'num_features'}), '(1, input_dim=num_features)\n', (11790, 11817), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((12323, 12423), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (12329, 12423), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((13148, 13248), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (13154, 13248), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((14001, 14105), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(None, None, C)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=(None, None, C), filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (14007, 14105), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((13567, 13578), 'coremltools.models.utils._is_macos', '_is_macos', ([], {}), '()\n', (13576, 13578), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((14935, 15056), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=num_kernels,\n kernel_size=(kernel_height, kernel_width))\n', (14941, 15056), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((15825, 15946), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=num_kernels,\n kernel_size=(kernel_height, kernel_width))\n', (15831, 15946), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((16730, 16843), 'keras.layers.Conv2D', 'Conv2D', (['nb_filters'], {'kernel_size': '(1, filter_length)', 'input_shape': '(1, input_length, input_dim)', 'padding': '"""valid"""'}), "(nb_filters, kernel_size=(1, filter_length), input_shape=(1,\n input_length, input_dim), padding='valid')\n", (16736, 16843), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((17495, 17600), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (17501, 17600), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((18115, 18212), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(None, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n None, input_dim))\n", (18121, 18212), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((18866, 18971), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (18872, 18971), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((19703, 19809), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""valid"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='valid', input_shape=\n (input_length, input_dim))\n", (19709, 19809), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((20319, 20429), 'keras.layers.Conv1D', 'Conv1D', (['num_kernels'], {'kernel_size': 'filter_length', 'padding': '"""valid"""', 'input_shape': 'input_shape', 'dilation_rate': '(3)'}), "(num_kernels, kernel_size=filter_length, padding='valid', input_shape\n =input_shape, dilation_rate=3)\n", (20325, 20429), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((21018, 21134), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""same"""'}), "(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width), padding='same')\n", (21024, 21134), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((21707, 21824), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""'}), "(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width), padding='valid')\n", (21713, 21824), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((22439, 22556), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""'}), "(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width), padding='valid')\n", (22445, 22556), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23077, 23107), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(2, 2, 2)'}), '(input_shape=(2, 2, 2))\n', (23084, 23107), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23310, 23372), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, (3, 3), activation='relu', input_shape=input_shape)\n", (23316, 23372), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23392, 23401), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (23399, 23401), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23421, 23452), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (23426, 23452), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((24020, 24120), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (24026, 24120), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((24207, 24240), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(1e-05)'}), '(epsilon=1e-05)\n', (24225, 24240), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((24937, 25037), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (24943, 25037), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((25124, 25184), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'center': '(False)', 'scale': '(False)', 'epsilon': '(1e-05)'}), '(center=False, scale=False, epsilon=1e-05)\n', (25142, 25184), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((25924, 26065), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""valid"""', 'use_bias': '(False)'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), input_shape=input_shape, padding='valid', use_bias=False)\n", (25939, 26065), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((26664, 26823), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""same"""', 'strides': '(2, 2)', 'use_bias': '(True)'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), input_shape=input_shape, padding='same', strides=(2, 2),\n use_bias=True)\n", (26679, 26823), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((27435, 27594), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""same"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='same',\n strides=(1, 1))\n", (27450, 27594), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((28190, 28350), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""valid"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='valid',\n strides=(1, 1))\n", (28205, 28350), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((28962, 29121), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""same"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='same',\n strides=(1, 1))\n", (28977, 29121), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((29734, 29894), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""valid"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='valid',\n strides=(1, 1))\n", (29749, 29894), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((30510, 30691), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""', 'strides': '(1, 1)', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='valid', strides=(1, 1), depth_multiplier=\n depth_multiplier, input_shape=input_shape)\n", (30525, 30691), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((31328, 31526), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='same', strides=(2, 2), activation='relu',\n depth_multiplier=depth_multiplier, input_shape=input_shape)\n", (31343, 31526), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((32193, 32374), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""', 'strides': '(1, 1)', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='valid', strides=(1, 1), depth_multiplier=\n depth_multiplier, input_shape=input_shape)\n", (32208, 32374), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((33085, 33283), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='same', strides=(2, 2), activation='relu',\n depth_multiplier=depth_multiplier, input_shape=input_shape)\n", (33100, 33283), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((34183, 34314), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=\n num_kernels, kernel_size=(kernel_height, kernel_width))\n', (34198, 34314), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((35134, 35265), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=\n num_kernels, kernel_size=(kernel_height, kernel_width))\n', (35149, 35265), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((35915, 36005), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(2, 2)', 'strides': 'None', 'padding': '"""valid"""'}), "(input_shape=(16, 16, 3), pool_size=(2, 2), strides=None,\n padding='valid')\n", (35927, 36005), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((36253, 36345), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(18, 18, 3)', 'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(input_shape=(18, 18, 3), pool_size=(3, 3), strides=(2, 2),\n padding='valid')\n", (36265, 36345), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((36568, 36660), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(input_shape=(16, 16, 3), pool_size=(3, 3), strides=(2, 2),\n padding='valid')\n", (36580, 36660), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((36884, 36975), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(input_shape=(16, 16, 3), pool_size=(3, 3), strides=(2, 2),\n padding='same')\n", (36896, 36975), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((37180, 37223), 'keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {'input_shape': '(16, 16, 3)'}), '(input_shape=(16, 16, 3))\n', (37198, 37223), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((37407, 37501), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(2, 2)', 'strides': 'None', 'padding': '"""valid"""'}), "(input_shape=(16, 16, 3), pool_size=(2, 2), strides=None,\n padding='valid')\n", (37423, 37501), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((37740, 37835), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""'}), "(input_shape=(16, 16, 3), pool_size=(3, 3), strides=(1, 1),\n padding='same')\n", (37756, 37835), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((38056, 38103), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'input_shape': '(16, 16, 3)'}), '(input_shape=(16, 16, 3))\n', (38078, 38103), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((38220, 38266), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'input_shape': '(16, 3)', 'pool_size': '(4)'}), '(input_shape=(16, 3), pool_size=4)\n', (38232, 38266), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((38529, 38634), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (38535, 38634), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((38737, 38757), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (38755, 38757), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((39017, 39122), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (39023, 39122), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((39225, 39254), 'keras.layers.AveragePooling1D', 'AveragePooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (39241, 39254), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((39521, 39626), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (39527, 39626), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((39729, 39753), 'keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (39751, 39753), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((40095, 40195), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (40101, 40195), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((40282, 40302), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (40294, 40302), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((40722, 40827), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (40728, 40827), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((40930, 40950), 'keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(2)'}), '(size=2)\n', (40942, 40950), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((41407, 41512), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (41413, 41512), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((41615, 41637), 'keras.layers.Cropping1D', 'Cropping1D', ([], {'cropping': '(2)'}), '(cropping=2)\n', (41625, 41637), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((42300, 42405), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (42306, 42405), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((42508, 42532), 'keras.layers.ZeroPadding1D', 'ZeroPadding1D', ([], {'padding': '(2)'}), '(padding=2)\n', (42521, 42532), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((43038, 43105), 'keras.layers.Conv1D', 'Conv1D', (['(1)', '(3)'], {'input_shape': '(10, 1)', 'use_bias': '(False)', 'padding': '"""causal"""'}), "(1, 3, input_shape=(10, 1), use_bias=False, padding='causal')\n", (43044, 43105), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((43389, 43423), 'keras.layers.Embedding', 'Embedding', (['num_inputs', 'num_outputs'], {}), '(num_inputs, num_outputs)\n', (43398, 43423), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((43867, 43917), 'keras.layers.Embedding', 'Embedding', (['num_inputs', 'num_outputs'], {'input_length': '(7)'}), '(num_inputs, num_outputs, input_length=7)\n', (43876, 43917), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((44477, 44539), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)'}), '(num_channels, input_shape=(input_length, input_dim))\n', (44486, 44539), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((44950, 45012), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)'}), '(num_channels, input_shape=(input_length, input_dim))\n', (44959, 45012), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((45462, 45551), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(True)'}), '(num_channels, input_shape=(input_length, input_dim),\n return_sequences=True)\n', (45471, 45551), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((46029, 46105), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False)\n', (46038, 46105), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((46534, 46633), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)', 'go_backwards': '(True)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False,\n go_backwards=True)\n', (46543, 46633), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((47169, 47231), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)'}), '(num_channels, input_shape=(input_length, input_dim))\n', (47178, 47231), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((47660, 47771), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=1,\n recurrent_activation='sigmoid')\n", (47664, 47771), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((48231, 48342), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=1,\n recurrent_activation='sigmoid')\n", (48235, 48342), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((48804, 48915), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (48808, 48915), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((49377, 49488), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (49381, 49488), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((49905, 49976), 'keras.layers.LSTM', 'LSTM', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False)\n', (49909, 49976), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((50315, 50409), 'keras.layers.LSTM', 'LSTM', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)', 'go_backwards': '(True)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False,\n go_backwards=True)\n', (50319, 50409), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((50886, 50979), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (50890, 50979), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((51505, 51616), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (51509, 51616), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((52171, 52282), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (52175, 52282), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((52885, 52977), 'keras.layers.GRU', 'GRU', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (52888, 52977), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((53714, 53806), 'keras.layers.GRU', 'GRU', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (53717, 53806), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((54387, 54479), 'keras.layers.GRU', 'GRU', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (54390, 54479), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((55140, 55210), 'keras.layers.GRU', 'GRU', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False)\n', (55143, 55210), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((55680, 55773), 'keras.layers.GRU', 'GRU', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)', 'go_backwards': '(True)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False,\n go_backwards=True)\n', (55683, 55773), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((61083, 61131), 'keras.layers.Input', 'Input', ([], {'name': '"""the_input"""', 'shape': '(None, input_dim)'}), "(name='the_input', shape=(None, input_dim))\n", (61088, 61131), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((61692, 61721), 'keras.models.Model', 'Model', (['[input_data]', '[y_pred]'], {}), '([input_data], [y_pred])\n', (61697, 61721), False, 'from keras.models import Sequential, Model\n'), ((62351, 62413), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)'}), '(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5))\n', (62357, 62413), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((62433, 62447), 'keras.layers.advanced_activations.ELU', 'ELU', ([], {'alpha': '(0.8)'}), '(alpha=0.8)\n', (62436, 62447), False, 'from keras.layers.advanced_activations import ELU\n'), ((62858, 62936), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding='same')\n", (62864, 62936), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((62995, 63020), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]'}), '(shared_axes=[1, 2])\n', (63000, 63020), False, 'from keras.layers.advanced_activations import PReLU\n'), ((63580, 63658), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding='same')\n", (63586, 63658), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((63717, 63737), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (63726, 63737), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((64128, 64206), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding='same')\n", (64134, 64206), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64265, 64291), 'keras.layers.advanced_activations.ThresholdedReLU', 'ThresholdedReLU', ([], {'theta': '(0.8)'}), '(theta=0.8)\n', (64280, 64291), False, 'from keras.layers.advanced_activations import ThresholdedReLU\n'), ((64646, 64665), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64651, 64665), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64693, 64712), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64698, 64712), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64730, 64749), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64735, 64749), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64802, 64821), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64807, 64821), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((65325, 65364), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {}), '(max_features, embedding_dims)\n', (65334, 65364), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((65392, 65431), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {}), '(max_features, embedding_dims)\n', (65401, 65431), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66032, 66082), 'keras.layers.Embedding', 'Embedding', (['vocab_size', '(50)'], {'input_length': 'seq_length'}), '(vocab_size, 50, input_length=seq_length)\n', (66041, 66082), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66107, 66126), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), '(axis=2)\n', (66118, 66126), False, 'from keras.layers import Add, Concatenate\n'), ((66161, 66211), 'keras.layers.LSTM', 'LSTM', (['units'], {'return_sequences': '(True)', 'stateful': '(False)'}), '(units, return_sequences=True, stateful=False)\n', (66165, 66211), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((66236, 66271), 'keras.layers.LSTM', 'LSTM', (['units'], {'return_sequences': '(False)'}), '(units, return_sequences=False)\n', (66240, 66271), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((66295, 66324), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (66300, 66324), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66348, 66387), 'keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (66353, 66387), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66870, 66889), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (66875, 66889), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66917, 66936), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (66922, 66936), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66954, 66973), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (66959, 66973), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67018, 67037), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67023, 67037), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67486, 67505), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67491, 67505), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67533, 67552), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67538, 67552), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67570, 67589), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67575, 67589), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67639, 67658), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67644, 67658), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68107, 68126), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68112, 68126), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68154, 68173), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68159, 68173), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68191, 68210), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68196, 68210), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68280, 68299), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68285, 68299), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68664, 68710), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': 'input_shape'}), '((1, 1), input_shape=input_shape)\n', (68677, 68710), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((69011, 69067), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['((2, 5), (3, 4))'], {'input_shape': 'input_shape'}), '(((2, 5), (3, 4)), input_shape=input_shape)\n', (69024, 69067), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((69366, 69428), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((2, 5), (2, 5))', 'input_shape': 'input_shape'}), '(cropping=((2, 5), (2, 5)), input_shape=input_shape)\n', (69376, 69428), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((69859, 69892), 'itertools.permutations', 'itertools.permutations', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (69881, 69892), False, 'import itertools\n'), ((69915, 69927), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (69925, 69927), False, 'from keras.models import Sequential, Model\n'), ((70141, 70183), 'keras.layers.Reshape', 'Reshape', (['(10, 1, 6)'], {'input_shape': '(5, 4, 3)'}), '((10, 1, 6), input_shape=(5, 4, 3))\n', (70148, 70183), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((70583, 70683), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (70589, 70683), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((70770, 70782), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (70777, 70782), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((70802, 70811), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (70809, 70811), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((70831, 70848), 'keras.layers.Dense', 'Dense', (['hidden_dim'], {}), '(hidden_dim)\n', (70836, 70848), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((71385, 71485), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (71391, 71485), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((71572, 71593), 'keras.layers.core.SpatialDropout2D', 'SpatialDropout2D', (['(0.5)'], {}), '(0.5)\n', (71588, 71593), False, 'from keras.layers.core import SpatialDropout2D\n'), ((71613, 71622), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (71620, 71622), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((71642, 71659), 'keras.layers.Dense', 'Dense', (['hidden_dim'], {}), '(hidden_dim)\n', (71647, 71659), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((72064, 72126), 'keras.layers.Dense', 'Dense', (['hidden_dim'], {'input_shape': '(input_dim,)', 'activation': '"""tanh"""'}), "(hidden_dim, input_shape=(input_dim,), activation='tanh')\n", (72069, 72126), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((72666, 72784), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'activation': '"""relu"""', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), "(input_shape=input_shape, activation='relu', filters=num_kernels,\n kernel_size=(kernel_height, kernel_width))\n", (72672, 72784), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((73617, 73728), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=1,\n recurrent_activation='sigmoid')\n", (73621, 73728), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((74333, 74362), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), '(padding=(1, 1))\n', (74346, 74362), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((74381, 74407), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(2)'}), '(axis=2)\n', (74399, 74407), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((74713, 74780), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'embed_channels'], {'input_length': 'sequence_length'}), '(vocab_size, embed_channels, input_length=sequence_length)\n', (74722, 74780), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((74800, 74809), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (74807, 74809), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((74829, 74847), 'keras.layers.Dense', 'Dense', (['dense_units'], {}), '(dense_units)\n', (74834, 74847), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((74867, 74876), 'keras.layers.Dense', 'Dense', (['(20)'], {}), '(20)\n', (74872, 74876), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75115, 75155), 'keras.layers.AveragePooling1D', 'AveragePooling1D', (['(2)'], {'input_shape': '(64, 9)'}), '(2, input_shape=(64, 9))\n', (75131, 75155), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((75175, 75239), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(False)'}), "(16, 1, padding='same', activation='relu', use_bias=False)\n", (75181, 75239), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75259, 75274), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), '(2)\n', (75271, 75274), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((75294, 75303), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (75301, 75303), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75323, 75375), 'keras.layers.Dense', 'Dense', ([], {'units': '(7)', 'activation': '"""softmax"""', 'use_bias': '(False)'}), "(units=7, activation='softmax', use_bias=False)\n", (75328, 75375), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75986, 76080), 'keras.layers.Embedding', 'Embedding', (['vocabulary_size', 'embedding_dimension'], {'input_length': 'input_length', 'trainable': '(True)'}), '(vocabulary_size, embedding_dimension, input_length=input_length,\n trainable=True)\n', (75995, 76080), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((76185, 76197), 'keras.layers.Conv1D', 'Conv1D', (['(5)', '(2)'], {}), '(5, 2)\n', (76191, 76197), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76217, 76237), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (76235, 76237), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76257, 76275), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (76267, 76275), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76296, 76311), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), '(2)\n', (76308, 76311), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((76671, 76760), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'return_sequences': '(True)', 'input_shape': '(input_length, input_dim)'}), '(num_channels, return_sequences=True, input_shape=(input_length,\n input_dim))\n', (76680, 76760), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((77343, 77441), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_channels', 'kernel_size': '(kernel_size, kernel_size)'}), '(input_shape=input_shape, filters=num_channels, kernel_size=(\n kernel_size, kernel_size))\n', (77349, 77441), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((78445, 78467), 'keras.layers.Dense', 'Dense', (['(6)'], {'name': '"""dense"""'}), "(6, name='dense')\n", (78450, 78467), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((78823, 78845), 'keras.layers.Dense', 'Dense', (['(6)'], {'name': '"""dense"""'}), "(6, name='dense')\n", (78828, 78845), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79147, 79165), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (79153, 79165), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79191, 79209), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (79197, 79209), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79227, 79236), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (79234, 79236), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79646, 79676), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (79651, 79676), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80135, 80143), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (80140, 80143), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80458, 80466), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (80463, 80466), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80483, 80491), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (80488, 80491), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80765, 80802), 'keras.layers.Dense', 'Dense', (['(4)'], {'name': '"""intermediate_dense_y"""'}), "(4, name='intermediate_dense_y')\n", (80770, 80802), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80818, 80855), 'keras.layers.Dense', 'Dense', (['(5)'], {'name': '"""intermediate_dense_z"""'}), "(5, name='intermediate_dense_z')\n", (80823, 80855), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81133, 81179), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'name': '"""intermdiate_conv2d_1"""'}), "(4, (3, 3), name='intermdiate_conv2d_1')\n", (81139, 81179), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81195, 81241), 'keras.layers.Conv2D', 'Conv2D', (['(5)', '(3, 3)'], {'name': '"""intermdiate_conv2d_2"""'}), "(5, (3, 3), name='intermdiate_conv2d_2')\n", (81201, 81241), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81529, 81600), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'name': '"""intermdiate_conv2d_1_fused"""', 'activation': '"""relu"""'}), "(4, (3, 3), name='intermdiate_conv2d_1_fused', activation='relu')\n", (81535, 81600), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81616, 81687), 'keras.layers.Conv2D', 'Conv2D', (['(5)', '(3, 3)'], {'name': '"""intermdiate_conv2d_2_fused"""', 'activation': '"""relu"""'}), "(5, (3, 3), name='intermdiate_conv2d_2_fused', activation='relu')\n", (81622, 81687), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81969, 82010), 'keras.layers.Conv1D', 'Conv1D', (['(4)', '(3)'], {'name': '"""intermdiate_conv1d_1"""'}), "(4, 3, name='intermdiate_conv1d_1')\n", (81975, 82010), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82026, 82067), 'keras.layers.Conv1D', 'Conv1D', (['(5)', '(3)'], {'name': '"""intermdiate_conv1d_2"""'}), "(5, 3, name='intermdiate_conv1d_2')\n", (82032, 82067), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82352, 82418), 'keras.layers.Conv1D', 'Conv1D', (['(4)', '(3)'], {'name': '"""intermdiate_conv1d_1_fused"""', 'activation': '"""relu"""'}), "(4, 3, name='intermdiate_conv1d_1_fused', activation='relu')\n", (82358, 82418), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82434, 82500), 'keras.layers.Conv1D', 'Conv1D', (['(5)', '(3)'], {'name': '"""intermdiate_conv1d_2_fused"""', 'activation': '"""relu"""'}), "(5, 3, name='intermdiate_conv1d_2_fused', activation='relu')\n", (82440, 82500), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82801, 82855), 'keras.layers.Conv1D', 'Conv1D', (['(3)', '(3)'], {'padding': '"""same"""', 'name': '"""interm_rcnn_conv1"""'}), "(3, 3, padding='same', name='interm_rcnn_conv1')\n", (82807, 82855), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82874, 82925), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""interm_rcnn_bn1"""'}), "(axis=-1, name='interm_rcnn_bn1')\n", (82892, 82925), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82941, 82958), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (82951, 82958), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82974, 83025), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'name': '"""interm_rcnn_pool1"""'}), "(pool_size=2, name='interm_rcnn_pool1')\n", (82986, 83025), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((83081, 83100), 'keras.layers.GRU', 'GRU', (['(6)'], {'name': '"""gru1"""'}), "(6, name='gru1')\n", (83084, 83100), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((83619, 83698), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)', 'strides': '(2, 2)', 'name': '"""conv1"""'}), "(4, (3, 3), padding='same', use_bias=False, strides=(2, 2), name='conv1')\n", (83625, 83698), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((83744, 83788), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv1_bn"""'}), "(axis=-1, name='conv1_bn')\n", (83762, 83788), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((83845, 83958), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', (['(3, 3)'], {'padding': '"""same"""', 'depth_multiplier': '(1)', 'strides': '(1, 1)', 'use_bias': '(False)', 'name': '"""conv_dw_1"""'}), "((3, 3), padding='same', depth_multiplier=1, strides=(1, 1),\n use_bias=False, name='conv_dw_1')\n", (83860, 83958), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((84053, 84101), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_dw_1_bn"""'}), "(axis=-1, name='conv_dw_1_bn')\n", (84071, 84101), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84162, 84250), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'strides': '(1, 1)', 'name': '"""conv_pw_1"""'}), "(8, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=\n 'conv_pw_1')\n", (84168, 84250), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84283, 84331), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_pw_1_bn"""'}), "(axis=-1, name='conv_pw_1_bn')\n", (84301, 84331), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84392, 84505), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', (['(3, 3)'], {'padding': '"""same"""', 'depth_multiplier': '(1)', 'strides': '(2, 2)', 'use_bias': '(False)', 'name': '"""conv_dw_2"""'}), "((3, 3), padding='same', depth_multiplier=1, strides=(2, 2),\n use_bias=False, name='conv_dw_2')\n", (84407, 84505), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((84600, 84648), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_dw_2_bn"""'}), "(axis=-1, name='conv_dw_2_bn')\n", (84618, 84648), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84709, 84797), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'strides': '(2, 2)', 'name': '"""conv_pw_2"""'}), "(8, (1, 1), padding='same', use_bias=False, strides=(2, 2), name=\n 'conv_pw_2')\n", (84715, 84797), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84830, 84878), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_pw_2_bn"""'}), "(axis=-1, name='conv_pw_2_bn')\n", (84848, 84878), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85328, 85398), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {'strides': '(2, 2)', 'use_bias': '(False)', 'name': '"""block1_conv1"""'}), "(2, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')\n", (85334, 85398), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85444, 85486), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block1_conv1_bn"""'}), "(name='block1_conv1_bn')\n", (85462, 85486), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85502, 85545), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""block1_conv1_act"""'}), "('relu', name='block1_conv1_act')\n", (85512, 85545), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85561, 85615), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'use_bias': '(False)', 'name': '"""block1_conv2"""'}), "(4, (3, 3), use_bias=False, name='block1_conv2')\n", (85567, 85615), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85631, 85673), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block1_conv2_bn"""'}), "(name='block1_conv2_bn')\n", (85649, 85673), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85689, 85732), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""block1_conv2_act"""'}), "('relu', name='block1_conv2_act')\n", (85699, 85732), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85756, 85821), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(1, 1)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(8, (1, 1), strides=(2, 2), padding='same', use_bias=False)\n", (85762, 85821), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85844, 85864), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (85862, 85864), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85888, 85975), 'keras.layers.SeparableConv2D', 'SeparableConv2D', (['(8)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""block2_sepconv1"""'}), "(8, (3, 3), padding='same', use_bias=False, name=\n 'block2_sepconv1')\n", (85903, 85975), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86008, 86053), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block2_sepconv1_bn"""'}), "(name='block2_sepconv1_bn')\n", (86026, 86053), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86069, 86115), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""block2_sepconv2_act"""'}), "('relu', name='block2_sepconv2_act')\n", (86079, 86115), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86131, 86218), 'keras.layers.SeparableConv2D', 'SeparableConv2D', (['(8)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""block2_sepconv2"""'}), "(8, (3, 3), padding='same', use_bias=False, name=\n 'block2_sepconv2')\n", (86146, 86218), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86251, 86296), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block2_sepconv2_bn"""'}), "(name='block2_sepconv2_bn')\n", (86269, 86296), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86313, 86385), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""block2_pool"""'}), "((3, 3), strides=(2, 2), padding='same', name='block2_pool')\n", (86325, 86385), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((86440, 86506), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(1, 1)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(16, (1, 1), strides=(2, 2), padding='same', use_bias=False)\n", (86446, 86506), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86529, 86549), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (86547, 86549), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86936, 86977), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(1, 1)'], {'input_shape': '(4, 4, 3)'}), '(32, (1, 1), input_shape=(4, 4, 3))\n', (86942, 86977), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87035, 87083), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': 'base_model.output_shape[1:]'}), '(input_shape=base_model.output_shape[1:])\n', (87042, 87083), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87107, 87135), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (87112, 87135), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87159, 87189), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (87164, 87189), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87993, 88005), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (88000, 88005), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((88025, 88070), 'keras.layers.LSTM', 'LSTM', (['(32)'], {'return_sequences': '(False)', 'dropout': '(0.5)'}), '(32, return_sequences=False, dropout=0.5)\n', (88029, 88070), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((88090, 88121), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""sigmoid"""'}), "(10, activation='sigmoid')\n", (88095, 88121), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((88837, 88855), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (88853, 88855), False, 'import tempfile\n'), ((90598, 90609), 'coremltools.models.utils._is_macos', '_is_macos', ([], {}), '()\n', (90607, 90609), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((92010, 92022), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (92020, 92022), False, 'from keras.models import Sequential, Model\n'), ((93184, 93196), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (93194, 93196), False, 'from keras.models import Sequential, Model\n'), ((94052, 94064), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (94062, 94064), False, 'from keras.models import Sequential, Model\n'), ((95203, 95215), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (95213, 95215), False, 'from keras.models import Sequential, Model\n'), ((96029, 96046), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (96034, 96046), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((96064, 96081), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (96069, 96081), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((96193, 96211), 'keras.models.Model', 'Model', (['[x1, x2]', 'z'], {}), '([x1, x2], z)\n', (96198, 96211), False, 'from keras.models import Sequential, Model\n'), ((96623, 96669), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': 'input_shape'}), '((1, 1), input_shape=input_shape)\n', (96636, 96669), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((96689, 96726), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (96695, 96726), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96746, 96767), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (96759, 96767), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((96787, 96824), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (96793, 96824), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96844, 96880), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (96856, 96880), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((96901, 96922), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (96914, 96922), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((96942, 96979), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (96948, 96979), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96999, 97020), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97012, 97020), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97040, 97077), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97046, 97077), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97097, 97133), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (97109, 97133), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((97154, 97175), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97167, 97175), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97195, 97232), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97201, 97232), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97252, 97273), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97265, 97273), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97293, 97330), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97299, 97330), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97350, 97371), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97363, 97371), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97391, 97428), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97397, 97428), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97448, 97484), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (97460, 97484), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((97505, 97526), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97518, 97526), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97546, 97583), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97552, 97583), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97603, 97624), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97616, 97624), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97644, 97681), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97650, 97681), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97701, 97722), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97714, 97722), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97742, 97779), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97748, 97779), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97799, 97835), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (97811, 97835), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((97856, 97877), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97869, 97877), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97897, 97934), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97903, 97934), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97954, 97975), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97967, 97975), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97995, 98032), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98001, 98032), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98052, 98073), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (98065, 98073), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((98093, 98130), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98099, 98130), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98150, 98186), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (98162, 98186), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((98207, 98216), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (98214, 98216), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98236, 98264), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (98241, 98264), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98284, 98296), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (98291, 98296), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((98316, 98344), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (98321, 98344), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98364, 98376), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (98371, 98376), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((98396, 98407), 'keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (98401, 98407), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98779, 98825), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': 'input_shape'}), '((1, 1), input_shape=input_shape)\n', (98792, 98825), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((98845, 98882), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98851, 98882), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98902, 98923), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (98915, 98923), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((98943, 98980), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98949, 98980), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99000, 99036), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99012, 99036), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((99057, 99078), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99070, 99078), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99098, 99135), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99104, 99135), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99155, 99176), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99168, 99176), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99196, 99233), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99202, 99233), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99253, 99289), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99265, 99289), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((99310, 99331), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99323, 99331), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99351, 99388), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99357, 99388), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99408, 99429), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99421, 99429), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99449, 99486), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99455, 99486), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99506, 99527), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99519, 99527), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99547, 99584), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99553, 99584), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99604, 99640), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99616, 99640), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((99661, 99682), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99674, 99682), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99702, 99739), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99708, 99739), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99759, 99780), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99772, 99780), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99800, 99837), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99806, 99837), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99857, 99878), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99870, 99878), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99898, 99935), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99904, 99935), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99955, 99991), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99967, 99991), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((100012, 100033), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (100025, 100033), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((100053, 100090), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (100059, 100090), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100110, 100131), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (100123, 100131), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((100151, 100188), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (100157, 100188), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100208, 100229), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (100221, 100229), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((100249, 100286), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (100255, 100286), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100306, 100342), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (100318, 100342), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((100363, 100372), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (100370, 100372), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100392, 100420), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (100397, 100420), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100474, 100502), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (100479, 100502), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100556, 100567), 'keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (100561, 100567), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101005, 101067), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, (3, 3), activation='relu', input_shape=input_shape)\n", (101011, 101067), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101087, 101124), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101093, 101124), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101145, 101182), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101151, 101182), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101202, 101239), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101208, 101239), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101260, 101297), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101266, 101297), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101317, 101354), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101323, 101354), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101374, 101411), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101380, 101411), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101432, 101469), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101438, 101469), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101489, 101526), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101495, 101526), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101546, 101583), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101552, 101583), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101604, 101641), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101610, 101641), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101661, 101698), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101667, 101698), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101718, 101755), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101724, 101755), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101776, 101785), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (101783, 101785), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101805, 101833), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (101810, 101833), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101853, 101865), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (101860, 101865), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((101885, 101913), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (101890, 101913), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101933, 101945), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (101940, 101945), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((101965, 101998), 'keras.layers.Dense', 'Dense', (['(1000)'], {'activation': '"""softmax"""'}), "(1000, activation='softmax')\n", (101970, 101998), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((102478, 102539), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {'input_length': 'max_len'}), '(max_features, embedding_dims, input_length=max_len)\n', (102487, 102539), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((102669, 102708), 'keras.layers.AveragePooling1D', 'AveragePooling1D', ([], {'pool_size': 'pool_length'}), '(pool_size=pool_length)\n', (102685, 102708), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((102852, 102908), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'input_shape': '(2, 4, 4)', 'padding': '"""same"""'}), "(3, (1, 1), input_shape=(2, 4, 4), padding='same')\n", (102858, 102908), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((102928, 102962), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (102944, 102962), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((102982, 102997), 'keras.layers.Reshape', 'Reshape', (['(2, 3)'], {}), '((2, 3))\n', (102989, 102997), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((103167, 103223), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'input_shape': '(2, 4, 4)', 'padding': '"""same"""'}), "(3, (1, 1), input_shape=(2, 4, 4), padding='same')\n", (103173, 103223), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103243, 103277), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (103259, 103277), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((103297, 103312), 'keras.layers.Reshape', 'Reshape', (['(2, 3)'], {}), '((2, 3))\n', (103304, 103312), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((103332, 103371), 'keras.layers.LSTM', 'LSTM', (['(5)'], {'recurrent_activation': '"""sigmoid"""'}), "(5, recurrent_activation='sigmoid')\n", (103336, 103371), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((103502, 103531), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(0, 1)'}), '(padding=(0, 1))\n', (103515, 103531), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((103550, 103594), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(2)', 'name': '"""bn_0_freq"""'}), "(axis=2, name='bn_0_freq')\n", (103568, 103594), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103633, 103680), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""conv1"""'}), "(2, (3, 3), padding='same', name='conv1')\n", (103639, 103680), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103696, 103734), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'name': '"""bn1"""'}), "(axis=3, name='bn1')\n", (103714, 103734), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103750, 103767), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (103760, 103767), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103783, 103843), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""pool1"""'}), "(pool_size=(2, 2), strides=(2, 2), name='pool1')\n", (103795, 103843), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((103882, 103929), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""conv2"""'}), "(4, (3, 3), padding='same', name='conv2')\n", (103888, 103929), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103945, 103983), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'name': '"""bn2"""'}), "(axis=3, name='bn2')\n", (103963, 103983), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103999, 104016), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (104009, 104016), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104032, 104092), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""pool2"""'}), "(pool_size=(2, 2), strides=(2, 2), name='pool2')\n", (104044, 104092), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((104144, 104159), 'keras.layers.Reshape', 'Reshape', (['(2, 4)'], {}), '((2, 4))\n', (104151, 104159), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((104175, 104218), 'keras.layers.GRU', 'GRU', (['(32)'], {'return_sequences': '(True)', 'name': '"""gru1"""'}), "(32, return_sequences=True, name='gru1')\n", (104178, 104218), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((104234, 104278), 'keras.layers.GRU', 'GRU', (['(32)'], {'return_sequences': '(False)', 'name': '"""gru2"""'}), "(32, return_sequences=False, name='gru2')\n", (104237, 104278), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((104576, 104635), 'keras.layers.LSTM', 'LSTM', (['(3)'], {'input_shape': '(4, 5)', 'recurrent_activation': '"""sigmoid"""'}), "(3, input_shape=(4, 5), recurrent_activation='sigmoid')\n", (104580, 104635), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((104655, 104663), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (104660, 104663), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104683, 104704), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (104693, 104704), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104854, 104871), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {}), '(2, (3, 3))\n', (104860, 104871), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104897, 104906), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (104904, 104906), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105062, 105088), 'keras.layers.Dense', 'Dense', (['(8)'], {'name': '"""cap_dense"""'}), "(8, name='cap_dense')\n", (105067, 105088), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105104, 105139), 'keras.layers.Reshape', 'Reshape', (['(1, 8)'], {'name': '"""cap_reshape"""'}), "((1, 8), name='cap_reshape')\n", (105111, 105139), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105361, 105378), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {}), '(2, (3, 3))\n', (105367, 105378), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105404, 105413), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (105411, 105413), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105554, 105580), 'keras.layers.Dense', 'Dense', (['(8)'], {'name': '"""cap_dense"""'}), "(8, name='cap_dense')\n", (105559, 105580), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105596, 105631), 'keras.layers.Reshape', 'Reshape', (['(1, 8)'], {'name': '"""cap_reshape"""'}), "((1, 8), name='cap_reshape')\n", (105603, 105631), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105709, 105746), 'keras.layers.Embedding', 'Embedding', (['(8)', '(8)'], {'name': '"""cap_embedding"""'}), "(8, 8, name='cap_embedding')\n", (105718, 105746), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106131, 106148), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {}), '(2, (3, 3))\n', (106137, 106148), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((106174, 106183), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (106181, 106183), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((106339, 106365), 'keras.layers.Dense', 'Dense', (['(8)'], {'name': '"""cap_dense"""'}), "(8, name='cap_dense')\n", (106344, 106365), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((106381, 106416), 'keras.layers.Reshape', 'Reshape', (['(1, 8)'], {'name': '"""cap_reshape"""'}), "((1, 8), name='cap_reshape')\n", (106388, 106416), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106494, 106531), 'keras.layers.Embedding', 'Embedding', (['(8)', '(8)'], {'name': '"""cap_embedding"""'}), "(8, 8, name='cap_embedding')\n", (106503, 106531), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106618, 106665), 'keras.layers.LSTM', 'LSTM', (['(4)'], {'return_sequences': '(True)', 'name': '"""cap_lstm"""'}), "(4, return_sequences=True, name='cap_lstm')\n", (106622, 106665), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((107100, 107140), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'embed_hidden_size'], {}), '(vocab_size, embed_hidden_size)\n', (107109, 107140), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107170, 107182), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (107177, 107182), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107255, 107295), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'embed_hidden_size'], {}), '(vocab_size, embed_hidden_size)\n', (107264, 107295), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107325, 107337), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (107332, 107337), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107355, 107402), 'keras.layers.LSTM', 'LSTM', (['embed_hidden_size'], {'return_sequences': '(False)'}), '(embed_hidden_size, return_sequences=False)\n', (107359, 107402), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((107420, 107446), 'keras.layers.RepeatVector', 'RepeatVector', (['story_maxlen'], {}), '(story_maxlen)\n', (107432, 107446), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107492, 107539), 'keras.layers.LSTM', 'LSTM', (['embed_hidden_size'], {'return_sequences': '(False)'}), '(embed_hidden_size, return_sequences=False)\n', (107496, 107539), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((107557, 107569), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (107564, 107569), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107587, 107626), 'keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (107592, 107626), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108070, 108164), 'keras.layers.Embedding', 'Embedding', (['vocabulary_size', 'embedding_dimension'], {'input_length': 'input_length', 'trainable': '(True)'}), '(vocabulary_size, embedding_dimension, input_length=input_length,\n trainable=True)\n', (108079, 108164), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((108269, 108282), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(2)'], {}), '(32, 2)\n', (108275, 108282), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108302, 108322), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108320, 108322), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108342, 108360), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (108352, 108360), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108381, 108394), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(2)'], {}), '(32, 2)\n', (108387, 108394), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108414, 108434), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108432, 108434), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108454, 108472), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (108464, 108472), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108493, 108506), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(2)'], {}), '(32, 2)\n', (108499, 108506), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108526, 108546), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108544, 108546), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108566, 108584), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (108576, 108584), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108605, 108621), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(17)'], {}), '(17)\n', (108617, 108621), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((108641, 108650), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (108648, 108650), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108671, 108694), 'keras.layers.Dense', 'Dense', (['(1)'], {'use_bias': '(True)'}), '(1, use_bias=True)\n', (108676, 108694), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108714, 108734), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108732, 108734), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108754, 108775), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (108764, 108775), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109162, 109180), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (109172, 109180), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109208, 109235), 'keras.layers.Cropping1D', 'Cropping1D', ([], {'cropping': '(1, 1)'}), '(cropping=(1, 1))\n', (109218, 109235), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((109265, 109295), 'keras.layers.Conv1D', 'Conv1D', (['(20)', '(3)'], {'padding': '"""valid"""'}), "(20, 3, padding='valid')\n", (109271, 109295), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109325, 109330), 'keras.layers.Add', 'Add', ([], {}), '()\n', (109328, 109330), False, 'from keras.layers import Add, Concatenate\n'), ((109718, 109749), 'keras.layers.Dense', 'Dense', (['(1000)'], {'input_shape': '(100,)'}), '(1000, input_shape=(100,))\n', (109723, 109749), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((8720, 8745), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (8734, 8745), False, 'import os\n'), ((8763, 8787), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (8776, 8787), False, 'import shutil\n'), ((9213, 9237), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (9227, 9237), True, 'import numpy as np\n'), ((9400, 9416), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (9407, 9416), True, 'import numpy as np\n'), ((9576, 9600), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (9590, 9600), True, 'import numpy as np\n'), ((10110, 10134), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (10124, 10134), True, 'import numpy as np\n'), ((10651, 10675), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (10665, 10675), True, 'import numpy as np\n'), ((11014, 11038), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (11028, 11038), True, 'import numpy as np\n'), ((11379, 11403), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (11393, 11403), True, 'import numpy as np\n'), ((11881, 11905), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (11895, 11905), True, 'import numpy as np\n'), ((12554, 12570), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (12561, 12570), True, 'import numpy as np\n'), ((13379, 13403), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (13393, 13403), True, 'import numpy as np\n'), ((14236, 14260), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (14250, 14260), True, 'import numpy as np\n'), ((13583, 13599), 'coremltools.models.utils._macos_version', '_macos_version', ([], {}), '()\n', (13597, 13599), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((15204, 15228), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (15218, 15228), True, 'import numpy as np\n'), ((16094, 16118), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (16108, 16118), True, 'import numpy as np\n'), ((16990, 17006), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (16997, 17006), True, 'import numpy as np\n'), ((17747, 17771), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (17761, 17771), True, 'import numpy as np\n'), ((18359, 18383), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (18373, 18383), True, 'import numpy as np\n'), ((19118, 19142), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (19132, 19142), True, 'import numpy as np\n'), ((19956, 19980), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (19970, 19980), True, 'import numpy as np\n'), ((20592, 20616), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (20606, 20616), True, 'import numpy as np\n'), ((21281, 21305), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (21295, 21305), True, 'import numpy as np\n'), ((21971, 21995), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (21985, 21995), True, 'import numpy as np\n'), ((22703, 22727), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (22717, 22727), True, 'import numpy as np\n'), ((24269, 24293), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (24283, 24293), True, 'import numpy as np\n'), ((25213, 25237), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (25227, 25237), True, 'import numpy as np\n'), ((26229, 26253), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (26243, 26253), True, 'import numpy as np\n'), ((26999, 27023), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (27013, 27023), True, 'import numpy as np\n'), ((27753, 27777), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (27767, 27777), True, 'import numpy as np\n'), ((28509, 28533), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (28523, 28533), True, 'import numpy as np\n'), ((29280, 29304), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (29294, 29304), True, 'import numpy as np\n'), ((30053, 30077), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (30067, 30077), True, 'import numpy as np\n'), ((30866, 30890), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (30880, 30890), True, 'import numpy as np\n'), ((31718, 31742), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (31732, 31742), True, 'import numpy as np\n'), ((32549, 32573), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (32563, 32573), True, 'import numpy as np\n'), ((33475, 33499), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (33489, 33499), True, 'import numpy as np\n'), ((34461, 34485), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (34475, 34485), True, 'import numpy as np\n'), ((35412, 35436), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (35426, 35436), True, 'import numpy as np\n'), ((40366, 40390), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (40380, 40390), True, 'import numpy as np\n'), ((41014, 41038), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (41028, 41038), True, 'import numpy as np\n'), ((41701, 41725), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (41715, 41725), True, 'import numpy as np\n'), ((42596, 42620), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (42610, 42620), True, 'import numpy as np\n'), ((43134, 43158), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (43148, 43158), True, 'import numpy as np\n'), ((43453, 43477), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (43467, 43477), True, 'import numpy as np\n'), ((43947, 43971), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (43961, 43971), True, 'import numpy as np\n'), ((44603, 44627), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (44617, 44627), True, 'import numpy as np\n'), ((54611, 54635), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (54625, 54635), True, 'import numpy as np\n'), ((56594, 56662), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=1, recurrent_activation='sigmoid')\n", (56598, 56662), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((57528, 57596), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=2, recurrent_activation='sigmoid')\n", (57532, 57596), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((58389, 58457), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=2, recurrent_activation='sigmoid')\n", (58393, 58457), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((59027, 59095), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=2, recurrent_activation='sigmoid')\n", (59031, 59095), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((59669, 59765), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'return_sequences': '(False)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, return_sequences=False, implementation=2,\n recurrent_activation='sigmoid')\n", (59673, 59765), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((60433, 60528), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'return_sequences': '(True)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, return_sequences=True, implementation=2,\n recurrent_activation='sigmoid')\n", (60437, 60528), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((62477, 62501), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (62491, 62501), True, 'import numpy as np\n'), ((63050, 63074), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (63064, 63074), True, 'import numpy as np\n'), ((63767, 63791), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (63781, 63791), True, 'import numpy as np\n'), ((64321, 64345), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (64335, 64345), True, 'import numpy as np\n'), ((64948, 64972), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (64962, 64972), True, 'import numpy as np\n'), ((65611, 65635), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (65625, 65635), True, 'import numpy as np\n'), ((66516, 66540), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (66530, 66540), True, 'import numpy as np\n'), ((67164, 67188), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (67178, 67188), True, 'import numpy as np\n'), ((67785, 67809), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (67799, 67809), True, 'import numpy as np\n'), ((68426, 68450), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (68440, 68450), True, 'import numpy as np\n'), ((68774, 68798), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (68788, 68798), True, 'import numpy as np\n'), ((69131, 69155), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (69145, 69155), True, 'import numpy as np\n'), ((69492, 69516), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (69506, 69516), True, 'import numpy as np\n'), ((69950, 69995), 'keras.layers.Permute', 'Permute', (['permute_order'], {'input_shape': '(4, 3, 2)'}), '(permute_order, input_shape=(4, 3, 2))\n', (69957, 69995), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((70912, 70936), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (70926, 70936), True, 'import numpy as np\n'), ((71723, 71747), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (71737, 71747), True, 'import numpy as np\n'), ((72190, 72214), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (72204, 72214), True, 'import numpy as np\n'), ((72932, 72956), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (72946, 72956), True, 'import numpy as np\n'), ((73198, 73206), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (73203, 73206), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((73259, 73283), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (73273, 73283), True, 'import numpy as np\n'), ((74906, 74930), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (74920, 74930), True, 'import numpy as np\n'), ((75405, 75429), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (75419, 75429), True, 'import numpy as np\n'), ((75642, 75672), 'keras.layers.Dense', 'Dense', (['(6)'], {'activation': '"""softmax"""'}), "(6, activation='softmax')\n", (75647, 75672), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76341, 76365), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (76355, 76365), True, 'import numpy as np\n'), ((76864, 76872), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (76869, 76872), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((77906, 77925), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (77911, 77925), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((78586, 78610), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (78600, 78610), True, 'import numpy as np\n'), ((78964, 78988), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (78978, 78988), True, 'import numpy as np\n'), ((79780, 79804), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (79794, 79804), True, 'import numpy as np\n'), ((80257, 80281), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (80271, 80281), True, 'import numpy as np\n'), ((80560, 80584), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (80574, 80584), True, 'import numpy as np\n'), ((80922, 80946), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (80936, 80946), True, 'import numpy as np\n'), ((81308, 81332), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (81322, 81332), True, 'import numpy as np\n'), ((82133, 82157), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (82147, 82157), True, 'import numpy as np\n'), ((83416, 83439), 'distutils.version.StrictVersion', '_StrictVersion', (['"""2.2.1"""'], {}), "('2.2.1')\n", (83430, 83439), True, 'from distutils.version import StrictVersion as _StrictVersion\n'), ((87472, 87509), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (87478, 87509), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87596, 87632), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(1, 1)'}), '((2, 2), strides=(1, 1))\n', (87608, 87632), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((87669, 87706), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(4, 4)'], {'activation': '"""relu"""'}), "(32, (4, 4), activation='relu')\n", (87675, 87706), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87743, 87779), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (87755, 87779), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((87816, 87853), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(4, 4)'], {'activation': '"""relu"""'}), "(32, (4, 4), activation='relu')\n", (87822, 87853), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87890, 87926), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (87902, 87926), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((87963, 87972), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (87970, 87972), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((90614, 90630), 'coremltools.models.utils._macos_version', '_macos_version', ([], {}), '()\n', (90628, 90630), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((90803, 90827), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (90816, 90827), False, 'import shutil\n'), ((92045, 92085), 'keras.layers.Dense', 'Dense', (['num_channels'], {'input_dim': 'input_dim'}), '(num_channels, input_dim=input_dim)\n', (92050, 92085), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((93219, 93273), 'keras.layers.Dense', 'Dense', (['num_channels'], {'input_shape': 'input_shape'}), '(num_channels, input_shape=input_shape, **kwargs)\n', (93224, 93273), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((94087, 94149), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(5)', 'kernel_size': '(7, 7)', 'input_shape': 'input_shape'}), '(filters=5, kernel_size=(7, 7), input_shape=input_shape)\n', (94093, 94149), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((94173, 94195), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {}), '(**kwargs)\n', (94185, 94195), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((95238, 95279), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape, **kwargs)\n', (95244, 95279), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96099, 96107), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (96104, 96107), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96129, 96137), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (96134, 96137), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103033, 103041), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (103038, 103041), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104365, 104389), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (104379, 104389), True, 'import numpy as np\n'), ((106697, 106705), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (106702, 106705), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109812, 109836), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (109826, 109836), True, 'import numpy as np\n'), ((3696, 3724), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (3710, 3724), True, 'import numpy as np\n'), ((7442, 7453), 'coremltools.models.utils._is_macos', '_is_macos', ([], {}), '()\n', (7451, 7453), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((61164, 61209), 'keras.layers.Dense', 'Dense', (['fc_size'], {'name': '"""fc1"""', 'activation': '"""relu"""'}), "(fc_size, name='fc1', activation='relu')\n", (61169, 61209), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((61301, 61394), 'keras.layers.LSTM', 'LSTM', (['rnn_size'], {'return_sequences': '(True)', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(rnn_size, return_sequences=True, activation='relu', kernel_initializer\n ='he_normal')\n", (61305, 61394), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((61600, 61654), 'keras.layers.Dense', 'Dense', (['output_dim'], {'name': '"""y_pred"""', 'activation': '"""softmax"""'}), "(output_dim, name='y_pred', activation='softmax')\n", (61605, 61654), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81754, 81778), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (81768, 81778), True, 'import numpy as np\n'), ((82566, 82590), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (82580, 82590), True, 'import numpy as np\n'), ((83464, 83484), 'keras.layers.ReLU', 'ReLU', (['(6.0)'], {'name': 'name'}), '(6.0, name=name)\n', (83468, 83484), False, 'from keras.layers import DepthwiseConv2D, ReLU\n'), ((83529, 83557), 'keras.layers.Activation', 'Activation', (['relu6'], {'name': 'name'}), '(relu6, name=name)\n', (83539, 83557), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((92502, 92526), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (92516, 92526), True, 'import numpy as np\n'), ((7458, 7474), 'coremltools.models.utils._macos_version', '_macos_version', ([], {}), '()\n', (7472, 7474), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((45089, 45113), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (45103, 45113), True, 'import numpy as np\n'), ((45696, 45720), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (45710, 45720), True, 'import numpy as np\n'), ((46191, 46215), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (46205, 46215), True, 'import numpy as np\n'), ((46794, 46818), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (46808, 46818), True, 'import numpy as np\n'), ((47308, 47332), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (47322, 47332), True, 'import numpy as np\n'), ((47898, 47922), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (47912, 47922), True, 'import numpy as np\n'), ((48469, 48493), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (48483, 48493), True, 'import numpy as np\n'), ((49042, 49066), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (49056, 49066), True, 'import numpy as np\n'), ((49615, 49639), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (49629, 49639), True, 'import numpy as np\n'), ((50028, 50052), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (50042, 50052), True, 'import numpy as np\n'), ((50536, 50560), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (50550, 50560), True, 'import numpy as np\n'), ((51124, 51148), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (51138, 51148), True, 'import numpy as np\n'), ((51777, 51801), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (51791, 51801), True, 'import numpy as np\n'), ((52443, 52467), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (52457, 52467), True, 'import numpy as np\n'), ((53122, 53146), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (53136, 53146), True, 'import numpy as np\n'), ((53951, 53975), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (53965, 53975), True, 'import numpy as np\n'), ((55296, 55320), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (55310, 55320), True, 'import numpy as np\n'), ((55934, 55958), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (55948, 55958), True, 'import numpy as np\n'), ((56818, 56842), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (56832, 56842), True, 'import numpy as np\n'), ((57752, 57776), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (57766, 57776), True, 'import numpy as np\n'), ((58613, 58637), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (58627, 58637), True, 'import numpy as np\n'), ((59251, 59275), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (59265, 59275), True, 'import numpy as np\n'), ((60016, 60040), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (60030, 60040), True, 'import numpy as np\n'), ((60779, 60803), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (60793, 60803), True, 'import numpy as np\n'), ((73890, 73914), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (73904, 73914), True, 'import numpy as np\n'), ((76950, 76974), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (76964, 76974), True, 'import numpy as np\n'), ((77552, 77576), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (77566, 77576), True, 'import numpy as np\n'), ((78018, 78042), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (78032, 78042), True, 'import numpy as np\n'), ((98511, 98535), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (98525, 98535), True, 'import numpy as np\n'), ((100671, 100695), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (100685, 100695), True, 'import numpy as np\n'), ((3774, 3802), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (3788, 3802), True, 'import numpy as np\n'), ((61770, 61794), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (61784, 61794), True, 'import numpy as np\n'), ((3610, 3633), 'numpy.product', 'np.product', (['input_shape'], {}), '(input_shape)\n', (3620, 3633), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Looks for scenes in arbitrary video
# python -W ignore video_to_scenes.py -in media/sample/LivingSt1958.mp4 -overwrite 1 -threshold 24 -fade 1 -plot 800
# python -W ignore video_to_scenes.py -in "media/downloads/ia_politicaladarchive/*.mp4" -threshold 24 -out "tmp/ia_politicaladarchive_scenes.csv"
import argparse
import csv
from lib.image_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.video_utils import *
import matplotlib.pyplot as plt
import numpy as np
import os
from pprint import pprint
from scenedetect.video_manager import VideoManager
from scenedetect.scene_manager import SceneManager
from scenedetect.stats_manager import StatsManager
from scenedetect.detectors.content_detector import ContentDetector
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILES", default="media/sample/moonlight.mp4", help="Input file pattern")
parser.add_argument('-out', dest="OUTPUT_FILE", default="tmp/scenes.csv", help="CSV output file")
parser.add_argument('-threshold', dest="THRESHOLD", default=30.0, type=float, help="Threshold for scene detection; lower number = more scenes")
parser.add_argument('-min', dest="MIN_SCENE_DUR", default=500, type=int, help="Minimum scene duration in milliseconds")
parser.add_argument('-fade', dest="CHECK_FOR_FADE", default=0, type=int, help="Check for crossfades?")
parser.add_argument('-stats', dest="SAVE_STATS", default=0, type=int, help="Save statistics?")
parser.add_argument('-window', dest="WINDOW_SIZE", default=60, type=int, help="For fades, this is the window size in frames")
parser.add_argument('-fthreshold', dest="FADE_THRESHOLD", default=3.0, type=float, help="Threshold for crossfade detection; lower number = more scenes")
parser.add_argument('-overwrite', dest="OVERWRITE", default=0, type=int, help="Overwrite existing data?")
parser.add_argument('-plot', dest="PLOT", default="", help="Draw plot frames (e.g. 30:90)")
args = parser.parse_args()
# Parse arguments
INPUT_FILES = args.INPUT_FILES
OUTPUT_FILE = args.OUTPUT_FILE
THRESHOLD = args.THRESHOLD
MIN_SCENE_DUR = args.MIN_SCENE_DUR
CHECK_FOR_FADE = args.CHECK_FOR_FADE > 0
SAVE_STATS = args.SAVE_STATS > 0
WINDOW_SIZE = args.WINDOW_SIZE
FADE_THRESHOLD = args.FADE_THRESHOLD
OVERWRITE = args.OVERWRITE > 0
PLOT = args.PLOT.strip()
# Determine plot frames
if ":" in PLOT:
PLOT = tuple([int(p) for p in PLOT.split(":")])
elif len(PLOT) > 0:
PLOT = (0, int(PLOT))
else:
PLOT = False
# Check if file exists already
if os.path.isfile(OUTPUT_FILE) and not OVERWRITE:
print("%s already exists. Skipping." % OUTPUT_FILE)
sys.exit()
# Read files
files = getFilenames(INPUT_FILES)
fileCount = len(files)
# Make sure output dirs exist
makeDirectories(OUTPUT_FILE)
progress = 0
def getScenes(video_path, threshold=30.0, minSceneDur=500, windowSize=50, fadeThreshold=3.0):
global progress
global fileCount
basename = os.path.basename(video_path)
doStats = CHECK_FOR_FADE or PLOT or SAVE_STATS
# type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
video_manager = VideoManager([video_path])
stats_manager = StatsManager()
# Construct our SceneManager and pass it our StatsManager.
scene_manager = SceneManager(stats_manager)
base_timecode = video_manager.get_base_timecode()
framerate = video_manager.get_framerate()
# Add ContentDetector algorithm (each detector's constructor
# takes detector options, e.g. threshold).
min_scene_len = roundInt(minSceneDur / 1000.0 * framerate)
scene_manager.add_detector(ContentDetector(threshold=threshold, min_scene_len=min_scene_len))
# We save our stats file to {VIDEO_PATH}.stats.csv.
stats_file_path = OUTPUT_FILE.replace(".csv", "%s.csv")
stats_file_path = stats_file_path % ("_" + basename + "_stats")
scene_list = []
print("Looking for scenes in %s" % video_path)
try:
# If stats file exists, load it.
if doStats and os.path.exists(stats_file_path):
# Read stats from CSV file opened in read mode:
with open(stats_file_path, 'r') as stats_file:
stats_manager.load_from_csv(stats_file, base_timecode)
# Set downscale factor to improve processing speed.
video_manager.set_downscale_factor()
# Start video_manager.
video_manager.start()
# Perform scene detection on video_manager.
scene_manager.detect_scenes(frame_source=video_manager)
# Obtain list of detected scenes.
scenes = scene_manager.get_scene_list(base_timecode)
# Each scene is a tuple of (start, end) FrameTimecodes.
for i, scene in enumerate(scenes):
start = roundInt(scene[0].get_seconds()*1000)
end = roundInt(scene[1].get_seconds()*1000)
scene_list.append({
"filename": basename,
"index": i,
"start": start,
"end": end,
"dur": end - start,
"frameStart": scene[0].get_frames(),
"frameEnd": scene[1].get_frames()
})
# We only write to the stats file if a save is required:
if doStats and stats_manager.is_save_required():
with open(stats_file_path, 'w') as stats_file:
stats_manager.save_to_csv(stats_file, base_timecode)
# Retrieve raw data for plotting and additional analysis
fieldNames, sceneData = readCsv(stats_file_path, skipLines=1)
dlen = len(sceneData)
# Add smoothed data
windowLeft = int(windowSize/2)
windowRight = windowSize - windowLeft
for i, d in enumerate(sceneData):
i0 = max(i - windowLeft, 0)
i1 = min(i + windowRight, dlen-1)
sceneData[i]["smoothed"] = np.mean([d["content_val"] for d in sceneData[i0:i1]])
sceneData[i]["ms"] = timecodeToMs(d["Timecode"])
# Add crossfade cuts
if CHECK_FOR_FADE:
for i, d in enumerate(sceneData):
ms = d["ms"]
value = d["smoothed"]
frame = d["Frame Number"]
neighboringCuts = [s for s in scene_list if abs(frame-s["frameStart"]) <= windowSize or abs(frame-s["frameEnd"]) <= windowSize]
# if there's no nearby cuts and we've reached the fade threshold
if len(neighboringCuts) <= 0 and value >= fadeThreshold:
# retrieve the scene right before this one
sortedList = sorted(scene_list, key=lambda k: k['frameStart'])
prev = [s for s in sortedList if s["frameStart"] < frame]
if len(prev) > 0:
prev = prev[-1]
else:
prev = sortedList[0]
# Find local minimums to determine fade start/end
leftWindow = sorted([d for d in sceneData if frame-windowSize < d["Frame Number"] < frame], key=lambda k: k['smoothed'])
rightWindow = sorted([d for d in sceneData if frame < d["Frame Number"] < frame+windowSize], key=lambda k: k['smoothed'])
fadeStart = leftWindow[0]
fadeEnd = rightWindow[0]
# Add new cut if we're not too close to the edges
if fadeStart["ms"]-prev["start"] >= minSceneDur and prev["end"] - fadeEnd["ms"] >= minSceneDur:
# Add the new scene
scene_list.append({
"filename": basename,
"index": prev["index"]+1,
"frameStart": fadeEnd["Frame Number"],
"frameEnd": prev["frameEnd"],
"start": fadeEnd["ms"],
"end": prev["end"],
"dur": prev["end"] - fadeEnd["ms"]
})
# Update the previous scene
scene_list[prev["index"]]["end"] = fadeStart["ms"]
scene_list[prev["index"]]["dur"] = fadeStart["ms"] - prev["start"]
scene_list[prev["index"]]["frameEnd"] = fadeStart["Frame Number"]
# Sort and update indices
scene_list = sorted(scene_list, key=lambda k: k['frameStart'])
for j, s in enumerate(scene_list):
scene_list[j]["index"] = j
if PLOT:
f0, f1 = PLOT
# add raw data
xs = [d["Frame Number"]-1 for d in sceneData if f0 <= d["Frame Number"] <= f1]
ys = [d["content_val"] for d in sceneData if f0 <= d["Frame Number"] <= f1]
plt.plot(xs, ys)
# add smoothed data
ys = [d["smoothed"] for d in sceneData if f0 <= d["Frame Number"] <= f1]
plt.plot(xs, ys, "c")
# add horizontal line for threshold
plt.plot([xs[0], xs[-1]], [threshold, threshold], "g--")
# add scenes as plot data
xs = [d["frameEnd"]-1 for d in scene_list if f0 <= d["frameEnd"] <= f1]
ys = [sceneData[d["frameEnd"]-1]["content_val"] for d in scene_list if f0 <= d["frameEnd"] <= f1]
plt.scatter(xs, ys, c="red")
plt.show()
if os.path.exists(stats_file_path) and not SAVE_STATS:
os.remove(stats_file_path)
finally:
video_manager.release()
progress += 1
sys.stdout.write('\r')
sys.stdout.write("%s%%" % round(1.0*progress/fileCount*100,1))
sys.stdout.flush()
return scene_list
scenes = []
for fn in files:
scenes += getScenes(fn, threshold=THRESHOLD, minSceneDur=MIN_SCENE_DUR, windowSize=WINDOW_SIZE, fadeThreshold=FADE_THRESHOLD)
headings = ["filename", "index", "start", "dur"]
writeCsv(OUTPUT_FILE, scenes, headings)
| [
"sys.stdout.write",
"os.remove",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"scenedetect.stats_manager.StatsManager",
"os.path.basename",
"scenedetect.scene_manager.SceneManager",
"scenedetect.detectors.content_detector.ContentDetector",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatt... | [((807, 832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (830, 832), False, 'import argparse\n'), ((2547, 2574), 'os.path.isfile', 'os.path.isfile', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (2561, 2574), False, 'import os\n'), ((2654, 2664), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2662, 2664), False, 'import sys\n'), ((2962, 2990), 'os.path.basename', 'os.path.basename', (['video_path'], {}), '(video_path)\n', (2978, 2990), False, 'import os\n'), ((3126, 3152), 'scenedetect.video_manager.VideoManager', 'VideoManager', (['[video_path]'], {}), '([video_path])\n', (3138, 3152), False, 'from scenedetect.video_manager import VideoManager\n'), ((3173, 3187), 'scenedetect.stats_manager.StatsManager', 'StatsManager', ([], {}), '()\n', (3185, 3187), False, 'from scenedetect.stats_manager import StatsManager\n'), ((3271, 3298), 'scenedetect.scene_manager.SceneManager', 'SceneManager', (['stats_manager'], {}), '(stats_manager)\n', (3283, 3298), False, 'from scenedetect.scene_manager import SceneManager\n'), ((9566, 9588), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (9582, 9588), False, 'import sys\n'), ((9660, 9678), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9676, 9678), False, 'import sys\n'), ((3607, 3672), 'scenedetect.detectors.content_detector.ContentDetector', 'ContentDetector', ([], {'threshold': 'threshold', 'min_scene_len': 'min_scene_len'}), '(threshold=threshold, min_scene_len=min_scene_len)\n', (3622, 3672), False, 'from scenedetect.detectors.content_detector import ContentDetector\n'), ((4005, 4036), 'os.path.exists', 'os.path.exists', (['stats_file_path'], {}), '(stats_file_path)\n', (4019, 4036), False, 'import os\n'), ((5849, 5902), 'numpy.mean', 'np.mean', (["[d['content_val'] for d in sceneData[i0:i1]]"], {}), "([d['content_val'] for d in sceneData[i0:i1]])\n", (5856, 5902), True, 'import numpy as np\n'), ((8810, 8826), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {}), '(xs, ys)\n', (8818, 8826), True, 'import matplotlib.pyplot as plt\n'), ((8957, 8978), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""c"""'], {}), "(xs, ys, 'c')\n", (8965, 8978), True, 'import matplotlib.pyplot as plt\n'), ((9040, 9096), 'matplotlib.pyplot.plot', 'plt.plot', (['[xs[0], xs[-1]]', '[threshold, threshold]', '"""g--"""'], {}), "([xs[0], xs[-1]], [threshold, threshold], 'g--')\n", (9048, 9096), True, 'import matplotlib.pyplot as plt\n'), ((9342, 9370), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': '"""red"""'}), "(xs, ys, c='red')\n", (9353, 9370), True, 'import matplotlib.pyplot as plt\n'), ((9383, 9393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9391, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9406, 9437), 'os.path.exists', 'os.path.exists', (['stats_file_path'], {}), '(stats_file_path)\n', (9420, 9437), False, 'import os\n'), ((9470, 9496), 'os.remove', 'os.remove', (['stats_file_path'], {}), '(stats_file_path)\n', (9479, 9496), False, 'import os\n')] |
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Calculate average latent variables (here called attribute vectors)
for the different attributes in CelebA
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import importlib
import math
import os
import sys
import time
import facenet
import h5py
import numpy as np
import tensorflow as tf
from six import iteritems
def main(args):
img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
vae_checkpoint = os.path.expanduser(args.vae_checkpoint)
fields, attribs_dict = read_annotations(args.annotations_filename)
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
image_list = facenet.get_image_paths(os.path.expanduser(args.data_dir))
# Get attributes for images
nrof_attributes = len(fields)
attribs_list = []
for img in image_list:
key = os.path.split(img)[1].split('.')[0]
attr = attribs_dict[key]
assert len(attr) == nrof_attributes
attribs_list.append(attr)
# Create the input queue
index_list = range(len(image_list))
input_queue = tf.train.slice_input_producer([image_list, attribs_list, index_list], num_epochs=1, shuffle=False)
nrof_preprocess_threads = 4
image_per_thread = []
for _ in range(nrof_preprocess_threads):
filename = input_queue[0]
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents, channels=3)
image = tf.image.resize_image_with_crop_or_pad(image, 160, 160)
# image = tf.image.resize_images(image, (64,64))
image.set_shape((args.image_size, args.image_size, 3))
attrib = input_queue[1]
attrib.set_shape((nrof_attributes,))
image = tf.cast(image, tf.float32)
image_per_thread.append([image, attrib, input_queue[2]])
images, attribs, indices = tf.train.batch_join(
image_per_thread, batch_size=args.batch_size,
shapes=[(args.image_size, args.image_size, 3), (nrof_attributes,), ()], enqueue_many=False,
capacity=4 * nrof_preprocess_threads * args.batch_size,
allow_smaller_final_batch=True)
# Normalize
images_norm = (images - img_mean) / img_stddev
# Resize to appropriate size for the encoder
images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size, gen_image_size))
# Create encoder network
mean, log_variance = vae.encoder(images_norm_resize, True)
epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
std = tf.exp(log_variance / 2)
latent_var = mean + epsilon * std
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Start running operations on the Graph
gpu_memory_fraction = 1.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if vae_checkpoint:
print('Restoring VAE checkpoint: %s' % vae_checkpoint)
saver.restore(sess, vae_checkpoint)
nrof_images = len(image_list)
nrof_batches = int(math.ceil(len(image_list) / args.batch_size))
latent_vars = np.zeros((nrof_images, args.latent_var_size))
attributes = np.zeros((nrof_images, nrof_attributes))
for i in range(nrof_batches):
start_time = time.time()
latent_var_, attribs_, indices_ = sess.run([latent_var, attribs, indices])
latent_vars[indices_, :] = latent_var_
attributes[indices_, :] = attribs_
duration = time.time() - start_time
print('Batch %d/%d: %.3f seconds' % (i + 1, nrof_batches, duration))
# NOTE: This will print the 'Out of range' warning if the last batch is not full,
# as described by https://github.com/tensorflow/tensorflow/issues/8330
# Calculate average change in the latent variable when each attribute changes
attribute_vectors = np.zeros((nrof_attributes, args.latent_var_size), np.float32)
for i in range(nrof_attributes):
pos_idx = np.argwhere(attributes[:, i] == 1)[:, 0]
neg_idx = np.argwhere(attributes[:, i] == -1)[:, 0]
pos_avg = np.mean(latent_vars[pos_idx, :], 0)
neg_avg = np.mean(latent_vars[neg_idx, :], 0)
attribute_vectors[i, :] = pos_avg - neg_avg
filename = os.path.expanduser(args.output_filename)
print('Writing attribute vectors, latent variables and attributes to %s' % filename)
mdict = {'latent_vars': latent_vars, 'attributes': attributes,
'fields': fields, 'attribute_vectors': attribute_vectors}
with h5py.File(filename, 'w') as f:
for key, value in iteritems(mdict):
f.create_dataset(key, data=value)
def read_annotations(filename):
attribs = {}
with open(filename, 'r') as f:
for i, line in enumerate(f.readlines()):
if i == 0:
continue # First line is the number of entries in the file
elif i == 1:
fields = line.strip().split() # Second line is the field names
else:
line = line.split()
img_name = line[0].split('.')[0]
img_attribs = map(int, line[1:])
attribs[img_name] = img_attribs
return fields, attribs
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('vae_def', type=str,
help='Model definition for the variational autoencoder. Points to a module containing the definition.',
default='src.generative.models.dfc_vae')
parser.add_argument('vae_checkpoint', type=str,
help='Checkpoint file of a pre-trained variational autoencoder.')
parser.add_argument('data_dir', type=str,
help='Path to the directory containing aligned face patches for the CelebA dataset.')
parser.add_argument('annotations_filename', type=str,
help='Path to the annotations file',
default='/media/deep/datasets/CelebA/Anno/list_attr_celeba.txt')
parser.add_argument('output_filename', type=str,
help='Filename to use for the file containing the attribute vectors.')
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=128)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=64)
parser.add_argument('--latent_var_size', type=int,
help='Dimensionality of the latent variable.', default=100)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"tensorflow.train.Coordinator",
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.train.batch_join",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"numpy.mean",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.GPUOptions",
"six.iteritems",
... | [((1538, 1589), 'numpy.array', 'np.array', (['[134.10714722, 102.52040863, 87.15436554]'], {}), '([134.10714722, 102.52040863, 87.15436554])\n', (1546, 1589), True, 'import numpy as np\n'), ((1694, 1733), 'os.path.expanduser', 'os.path.expanduser', (['args.vae_checkpoint'], {}), '(args.vae_checkpoint)\n', (1712, 1733), False, 'import os\n'), ((1821, 1858), 'importlib.import_module', 'importlib.import_module', (['args.vae_def'], {}), '(args.vae_def)\n', (1844, 1858), False, 'import importlib\n'), ((7351, 7376), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7374, 7376), False, 'import argparse\n'), ((1615, 1670), 'numpy.array', 'np.array', (['[3941.30175781, 2856.94287109, 2519.35791016]'], {}), '([3941.30175781, 2856.94287109, 2519.35791016])\n', (1623, 1670), True, 'import numpy as np\n'), ((1988, 2017), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (2006, 2017), True, 'import tensorflow as tf\n'), ((2508, 2610), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[image_list, attribs_list, index_list]'], {'num_epochs': '(1)', 'shuffle': '(False)'}), '([image_list, attribs_list, index_list],\n num_epochs=1, shuffle=False)\n', (2537, 2610), True, 'import tensorflow as tf\n'), ((3322, 3581), 'tensorflow.train.batch_join', 'tf.train.batch_join', (['image_per_thread'], {'batch_size': 'args.batch_size', 'shapes': '[(args.image_size, args.image_size, 3), (nrof_attributes,), ()]', 'enqueue_many': '(False)', 'capacity': '(4 * nrof_preprocess_threads * args.batch_size)', 'allow_smaller_final_batch': '(True)'}), '(image_per_thread, batch_size=args.batch_size, shapes=[(\n args.image_size, args.image_size, 3), (nrof_attributes,), ()],\n enqueue_many=False, capacity=4 * nrof_preprocess_threads * args.\n batch_size, allow_smaller_final_batch=True)\n', (3341, 3581), True, 'import tensorflow as tf\n'), ((3777, 3846), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['images_norm', '(gen_image_size, gen_image_size)'], {}), '(images_norm, (gen_image_size, gen_image_size))\n', (3799, 3846), True, 'import tensorflow as tf\n'), ((4041, 4065), 'tensorflow.exp', 'tf.exp', (['(log_variance / 2)'], {}), '(log_variance / 2)\n', (4047, 4065), True, 'import tensorflow as tf\n'), ((4311, 4377), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_memory_fraction'}), '(per_process_gpu_memory_fraction=gpu_memory_fraction)\n', (4324, 4377), True, 'import tensorflow as tf\n'), ((4599, 4621), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (4619, 4621), True, 'import tensorflow as tf\n'), ((4630, 4682), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord', 'sess': 'sess'}), '(coord=coord, sess=sess)\n', (4658, 4682), True, 'import tensorflow as tf\n'), ((2064, 2097), 'os.path.expanduser', 'os.path.expanduser', (['args.data_dir'], {}), '(args.data_dir)\n', (2082, 2097), False, 'import os\n'), ((2789, 2811), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (2801, 2811), True, 'import tensorflow as tf\n'), ((2832, 2880), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['file_contents'], {'channels': '(3)'}), '(file_contents, channels=3)\n', (2853, 2880), True, 'import tensorflow as tf\n'), ((2901, 2956), 'tensorflow.image.resize_image_with_crop_or_pad', 'tf.image.resize_image_with_crop_or_pad', (['image', '(160)', '(160)'], {}), '(image, 160, 160)\n', (2939, 2956), True, 'import tensorflow as tf\n'), ((3190, 3216), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (3197, 3216), True, 'import tensorflow as tf\n'), ((4165, 4189), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4187, 4189), True, 'import tensorflow as tf\n'), ((4497, 4530), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4528, 4530), True, 'import tensorflow as tf\n'), ((4549, 4581), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4579, 4581), True, 'import tensorflow as tf\n'), ((5017, 5062), 'numpy.zeros', 'np.zeros', (['(nrof_images, args.latent_var_size)'], {}), '((nrof_images, args.latent_var_size))\n', (5025, 5062), True, 'import numpy as np\n'), ((5088, 5128), 'numpy.zeros', 'np.zeros', (['(nrof_images, nrof_attributes)'], {}), '((nrof_images, nrof_attributes))\n', (5096, 5128), True, 'import numpy as np\n'), ((5847, 5908), 'numpy.zeros', 'np.zeros', (['(nrof_attributes, args.latent_var_size)', 'np.float32'], {}), '((nrof_attributes, args.latent_var_size), np.float32)\n', (5855, 5908), True, 'import numpy as np\n'), ((6297, 6337), 'os.path.expanduser', 'os.path.expanduser', (['args.output_filename'], {}), '(args.output_filename)\n', (6315, 6337), False, 'import os\n'), ((1955, 1965), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1963, 1965), True, 'import tensorflow as tf\n'), ((4411, 4478), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, log_device_placement=False)\n', (4425, 4478), True, 'import tensorflow as tf\n'), ((5200, 5211), 'time.time', 'time.time', ([], {}), '()\n', (5209, 5211), False, 'import time\n'), ((6115, 6150), 'numpy.mean', 'np.mean', (['latent_vars[pos_idx, :]', '(0)'], {}), '(latent_vars[pos_idx, :], 0)\n', (6122, 6150), True, 'import numpy as np\n'), ((6177, 6212), 'numpy.mean', 'np.mean', (['latent_vars[neg_idx, :]', '(0)'], {}), '(latent_vars[neg_idx, :], 0)\n', (6184, 6212), True, 'import numpy as np\n'), ((6606, 6630), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (6615, 6630), False, 'import h5py\n'), ((6671, 6687), 'six.iteritems', 'iteritems', (['mdict'], {}), '(mdict)\n', (6680, 6687), False, 'from six import iteritems\n'), ((3985, 3999), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (3993, 3999), True, 'import tensorflow as tf\n'), ((5436, 5447), 'time.time', 'time.time', ([], {}), '()\n', (5445, 5447), False, 'import time\n'), ((5980, 6014), 'numpy.argwhere', 'np.argwhere', (['(attributes[:, i] == 1)'], {}), '(attributes[:, i] == 1)\n', (5991, 6014), True, 'import numpy as np\n'), ((6047, 6082), 'numpy.argwhere', 'np.argwhere', (['(attributes[:, i] == -1)'], {}), '(attributes[:, i] == -1)\n', (6058, 6082), True, 'import numpy as np\n'), ((2249, 2267), 'os.path.split', 'os.path.split', (['img'], {}), '(img)\n', (2262, 2267), False, 'import os\n')] |
#!/usr/bin/env python3
# This is a script that analyses the simulation results from
# the script `PICMI_inputs_2d`.
import sys
import matplotlib
matplotlib.use('Agg')
import yt
yt.funcs.mylog.setLevel(50)
import numpy as np
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksum
# this will be the name of the first plot file
fn1 = "Python_pass_mpi_comm_plt1_00010"
# second plot file
fn2 = "Python_pass_mpi_comm_plt2_00010"
test_name1 = fn1[:-9]
test_name2 = fn2[:-9]
checksum1 = checksum.Checksum(test_name1, fn1, do_fields=True,
do_particles=True)
checksum2 = checksum.Checksum(test_name2, fn2, do_fields=True,
do_particles=True)
rtol=1.e-9
atol=1.e-40
# Evaluate checksums against each other, adapted from
# Checksum.evaluate() method
# Dictionaries have same outer keys (levels, species)?
if (checksum1.data.keys() != checksum2.data.keys()):
print("ERROR: plotfile 1 and plotfile 2 checksums "
"have different outer keys:")
print("Plot1: %s" % checksum1.data.keys())
print("Plot2: %s" % checksum2.data.keys())
sys.exit(1)
# Dictionaries have same inner keys (field and particle quantities)?
for key1 in checksum1.data.keys():
if (checksum1.data[key1].keys() != checksum2.data[key1].keys()):
print("ERROR: plotfile 1 and plotfile 2 checksums have "
"different inner keys:")
print("Common outer keys: %s" % checksum2.data.keys())
print("Plotfile 1 inner keys in %s: %s"
% (key1, checksum1.data[key1].keys()))
print("Plotfile 2 inner keys in %s: %s"
% (key1, checksum2.data[key1].keys()))
sys.exit(1)
# Dictionaries have same values?
checksums_same = False
for key1 in checksum1.data.keys():
for key2 in checksum1.data[key1].keys():
passed = np.isclose(checksum2.data[key1][key2],
checksum1.data[key1][key2],
rtol=rtol, atol=atol)
# skip over these, since they will be the same if communicators
# have same number of procs
if key2 in ["particle_cpu", "particle_id", "particle_position_y"]:
continue
if passed:
print("ERROR: plotfile 1 and plotfile 2 checksums have "
"same values for key [%s,%s]" % (key1, key2))
print("Plotfile 1: [%s,%s] %.15e"
% (key1, key2, checksum1.data[key1][key2]))
print("Plotfile 2: [%s,%s] %.15e"
% (key1, key2, checksum2.data[key1][key2]))
checksums_same = True
if checksums_same:
sys.exit(1)
| [
"yt.funcs.mylog.setLevel",
"sys.path.insert",
"numpy.isclose",
"matplotlib.use",
"checksum.Checksum",
"sys.exit"
] | [((147, 168), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (161, 168), False, 'import matplotlib\n'), ((179, 206), 'yt.funcs.mylog.setLevel', 'yt.funcs.mylog.setLevel', (['(50)'], {}), '(50)\n', (202, 206), False, 'import yt\n'), ((226, 286), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (241, 286), False, 'import sys\n'), ((510, 579), 'checksum.Checksum', 'checksum.Checksum', (['test_name1', 'fn1'], {'do_fields': '(True)', 'do_particles': '(True)'}), '(test_name1, fn1, do_fields=True, do_particles=True)\n', (527, 579), False, 'import checksum\n'), ((622, 691), 'checksum.Checksum', 'checksum.Checksum', (['test_name2', 'fn2'], {'do_fields': '(True)', 'do_particles': '(True)'}), '(test_name2, fn2, do_fields=True, do_particles=True)\n', (639, 691), False, 'import checksum\n'), ((1134, 1145), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1142, 1145), False, 'import sys\n'), ((2649, 2660), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2657, 2660), False, 'import sys\n'), ((1703, 1714), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1711, 1714), False, 'import sys\n'), ((1869, 1962), 'numpy.isclose', 'np.isclose', (['checksum2.data[key1][key2]', 'checksum1.data[key1][key2]'], {'rtol': 'rtol', 'atol': 'atol'}), '(checksum2.data[key1][key2], checksum1.data[key1][key2], rtol=\n rtol, atol=atol)\n', (1879, 1962), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from collections import defaultdict
import pytest
import numpy as np
import mne
import mne_nirs
from mne.datasets import testing
from mne.utils import catch_logging, check_version
from mne_nirs.experimental_design.tests.test_experimental_design import \
_load_dataset
from mne_nirs.experimental_design import make_first_level_design_matrix
from mne_nirs.statistics import run_glm
from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection
from mne_nirs.utils import glm_to_tidy
testing_path = testing.data_path(download=False)
raw_path = testing_path + '/NIRx/nirscout/nirx_15_2_recording_w_short'
subjects_dir = testing_path + '/subjects'
requires_mne_1 = pytest.mark.skipif(not check_version('mne', '1.0'),
reason='Needs MNE-Python 1.0')
def test_plot_nirs_source_detector_pyvista(requires_pyvista):
raw = mne.io.read_raw_nirx(raw_path)
mne_nirs.visualisation.plot_nirs_source_detector(
np.random.randn(len(raw.ch_names)),
raw.info, show_axes=True,
subject='fsaverage',
trans='fsaverage',
surfaces=['white'],
fnirs=False,
subjects_dir=subjects_dir,
verbose=True)
mne_nirs.visualisation.plot_nirs_source_detector(
np.abs(np.random.randn(len(raw.ch_names))) + 5,
raw.info, show_axes=True,
subject='fsaverage',
trans='fsaverage',
surfaces=['white'],
fnirs=False,
subjects_dir=subjects_dir,
verbose=True)
@pytest.mark.filterwarnings('ignore:"plot_glm_topo" has been deprecated.*:')
def test_run_plot_GLM_topo():
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
glm_estimates = run_glm(raw_haemo, design_matrix)
fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix)
# 5 conditions (A,B,C,Drift,Constant) * two chroma + 2xcolorbar
assert len(fig.axes) == 12
# Two conditions * two chroma + 2 x colorbar
fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix,
requested_conditions=['A', 'B'])
assert len(fig.axes) == 6
# Two conditions * one chroma + 1 x colorbar
with pytest.warns(RuntimeWarning, match='Reducing GLM results'):
fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"),
glm_estimates.data, design_matrix,
requested_conditions=['A', 'B'])
assert len(fig.axes) == 3
# One conditions * two chroma + 2 x colorbar
fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix,
requested_conditions=['A'])
assert len(fig.axes) == 4
# One conditions * one chroma + 1 x colorbar
with pytest.warns(RuntimeWarning, match='Reducing GLM results'):
fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"),
glm_estimates.data,
design_matrix, requested_conditions=['A'])
assert len(fig.axes) == 2
# One conditions * one chroma + 0 x colorbar
with pytest.warns(RuntimeWarning, match='Reducing GLM results'):
fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"),
glm_estimates.data, design_matrix,
colorbar=False, requested_conditions=['A'])
assert len(fig.axes) == 1
# Ensure warning thrown if glm estimates is missing channels from raw
glm_estimates_subset = {a: glm_estimates.data[a]
for a in raw_haemo.ch_names[0:3]}
with pytest.raises(RuntimeError, match="does not match regression"):
plot_glm_topo(raw_haemo, glm_estimates_subset, design_matrix)
def test_run_plot_GLM_contrast_topo():
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
glm_est = run_glm(raw_haemo, design_matrix)
contrast_matrix = np.eye(design_matrix.shape[1])
basic_conts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrix.columns)])
contrast_LvR = basic_conts['A'] - basic_conts['B']
with pytest.deprecated_call(match='comprehensive GLM'):
contrast = mne_nirs.statistics.compute_contrast(
glm_est.data, contrast_LvR)
with pytest.deprecated_call(match='comprehensive GLM'):
fig = mne_nirs.visualisation.plot_glm_contrast_topo(
raw_haemo, contrast)
assert len(fig.axes) == 3
def test_run_plot_GLM_contrast_topo_single_chroma():
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
raw_haemo = raw_haemo.pick(picks='hbo')
glm_est = run_glm(raw_haemo, design_matrix)
contrast_matrix = np.eye(design_matrix.shape[1])
basic_conts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrix.columns)])
contrast_LvR = basic_conts['A'] - basic_conts['B']
with pytest.deprecated_call(match='comprehensive GLM'):
contrast = mne_nirs.statistics.compute_contrast(
glm_est.data, contrast_LvR)
with pytest.deprecated_call(match='comprehensive GLM'):
fig = mne_nirs.visualisation.plot_glm_contrast_topo(
raw_haemo, contrast)
assert len(fig.axes) == 2
def test_fig_from_axes():
from mne_nirs.visualisation._plot_GLM_topo import _get_fig_from_axes
with pytest.raises(RuntimeError, match="Unable to extract figure"):
_get_fig_from_axes([1, 2, 3])
# surface arg
@requires_mne_1
def test_run_plot_GLM_projection(requires_pyvista):
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
glm_estimates = run_glm(raw_haemo, design_matrix)
df = glm_to_tidy(raw_haemo, glm_estimates.data, design_matrix)
df = df.query("Chroma in 'hbo'")
df = df.query("Condition in 'A'")
brain = plot_glm_surface_projection(raw_haemo.copy().pick("hbo"),
df, clim='auto', view='dorsal',
colorbar=True, size=(800, 700),
value="theta", surface='white',
subjects_dir=subjects_dir)
assert type(brain) == mne.viz._brain.Brain
@requires_mne_1
@pytest.mark.parametrize('fname_raw, to_1020, ch_names', [
(raw_path, False, None),
(raw_path, True, 'numbered'),
(raw_path, True, defaultdict(lambda: '')),
])
def test_plot_3d_montage(requires_pyvista, fname_raw, to_1020, ch_names):
import pyvista
pyvista.close_all()
assert len(pyvista.plotting._ALL_PLOTTERS) == 0
raw = mne.io.read_raw_nirx(fname_raw)
if to_1020:
need = set(sum(
(ch_name.split()[0].split('_') for ch_name in raw.ch_names),
list()))
mon = mne.channels.make_standard_montage('standard_1020')
mon.rename_channels({h: n for h, n in zip(mon.ch_names, need)})
raw.set_montage(mon)
n_labels = len(raw.ch_names) // 2
view_map = {'left-lat': np.arange(1, n_labels // 2),
'caudal': np.arange(n_labels // 2, n_labels + 1)}
# We use "sample" here even though it's wrong so that we can have a head
# surface
with catch_logging() as log:
mne_nirs.viz.plot_3d_montage(
raw.info, view_map, subject='sample', surface='white',
subjects_dir=subjects_dir, ch_names=ch_names, verbose=True)
assert len(pyvista.plotting._ALL_PLOTTERS) == 0
log = log.getvalue().lower()
if to_1020:
assert 'automatically mapped' in log
else:
assert 'could not' in log
| [
"mne.channels.make_standard_montage",
"mne_nirs.visualisation.plot_glm_contrast_topo",
"mne.preprocessing.nirs.beer_lambert_law",
"collections.defaultdict",
"pyvista.close_all",
"numpy.arange",
"mne_nirs.visualisation.plot_glm_topo",
"mne.utils.catch_logging",
"pytest.warns",
"mne_nirs.viz.plot_3d... | [((578, 611), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {'download': '(False)'}), '(download=False)\n', (595, 611), False, 'from mne.datasets import testing\n'), ((1572, 1647), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:"plot_glm_topo" has been deprecated.*:"""'], {}), '(\'ignore:"plot_glm_topo" has been deprecated.*:\')\n', (1598, 1647), False, 'import pytest\n'), ((936, 966), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['raw_path'], {}), '(raw_path)\n', (956, 966), False, 'import mne\n'), ((1698, 1713), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (1711, 1713), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((1790, 1881), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (1820, 1881), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((1992, 2045), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (2030, 2045), False, 'import mne\n'), ((2062, 2118), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (2101, 2118), False, 'import mne\n'), ((2139, 2172), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (2146, 2172), False, 'from mne_nirs.statistics import run_glm\n'), ((2183, 2242), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {}), '(raw_haemo, glm_estimates.data, design_matrix)\n', (2196, 2242), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((2402, 2498), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {'requested_conditions': "['A', 'B']"}), "(raw_haemo, glm_estimates.data, design_matrix,\n requested_conditions=['A', 'B'])\n", (2415, 2498), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((2946, 3037), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {'requested_conditions': "['A']"}), "(raw_haemo, glm_estimates.data, design_matrix,\n requested_conditions=['A'])\n", (2959, 3037), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((4162, 4177), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (4175, 4177), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((4254, 4345), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (4284, 4345), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((4456, 4509), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (4494, 4509), False, 'import mne\n'), ((4526, 4582), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (4565, 4582), False, 'import mne\n'), ((4597, 4630), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (4604, 4630), False, 'from mne_nirs.statistics import run_glm\n'), ((4653, 4683), 'numpy.eye', 'np.eye', (['design_matrix.shape[1]'], {}), '(design_matrix.shape[1])\n', (4659, 4683), True, 'import numpy as np\n'), ((5284, 5299), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (5297, 5299), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((5376, 5467), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (5406, 5467), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((5578, 5631), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (5616, 5631), False, 'import mne\n'), ((5648, 5704), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (5687, 5704), False, 'import mne\n'), ((5763, 5796), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (5770, 5796), False, 'from mne_nirs.statistics import run_glm\n'), ((5819, 5849), 'numpy.eye', 'np.eye', (['design_matrix.shape[1]'], {}), '(design_matrix.shape[1])\n', (5825, 5849), True, 'import numpy as np\n'), ((6690, 6705), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (6703, 6705), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((6782, 6873), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (6812, 6873), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((6984, 7037), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (7022, 7037), False, 'import mne\n'), ((7054, 7110), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (7093, 7110), False, 'import mne\n'), ((7131, 7164), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (7138, 7164), False, 'from mne_nirs.statistics import run_glm\n'), ((7174, 7231), 'mne_nirs.utils.glm_to_tidy', 'glm_to_tidy', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {}), '(raw_haemo, glm_estimates.data, design_matrix)\n', (7185, 7231), False, 'from mne_nirs.utils import glm_to_tidy\n'), ((7995, 8014), 'pyvista.close_all', 'pyvista.close_all', ([], {}), '()\n', (8012, 8014), False, 'import pyvista\n'), ((8077, 8108), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['fname_raw'], {}), '(fname_raw)\n', (8097, 8108), False, 'import mne\n'), ((766, 793), 'mne.utils.check_version', 'check_version', (['"""mne"""', '"""1.0"""'], {}), "('mne', '1.0')\n", (779, 793), False, 'from mne.utils import catch_logging, check_version\n'), ((2608, 2666), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Reducing GLM results"""'}), "(RuntimeWarning, match='Reducing GLM results')\n", (2620, 2666), False, 'import pytest\n'), ((3147, 3205), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Reducing GLM results"""'}), "(RuntimeWarning, match='Reducing GLM results')\n", (3159, 3205), False, 'import pytest\n'), ((3479, 3537), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Reducing GLM results"""'}), "(RuntimeWarning, match='Reducing GLM results')\n", (3491, 3537), False, 'import pytest\n'), ((3967, 4029), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""does not match regression"""'}), "(RuntimeError, match='does not match regression')\n", (3980, 4029), False, 'import pytest\n'), ((4039, 4100), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates_subset', 'design_matrix'], {}), '(raw_haemo, glm_estimates_subset, design_matrix)\n', (4052, 4100), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((4877, 4926), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (4899, 4926), False, 'import pytest\n'), ((4947, 5011), 'mne_nirs.statistics.compute_contrast', 'mne_nirs.statistics.compute_contrast', (['glm_est.data', 'contrast_LvR'], {}), '(glm_est.data, contrast_LvR)\n', (4983, 5011), False, 'import mne_nirs\n'), ((5034, 5083), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (5056, 5083), False, 'import pytest\n'), ((5099, 5165), 'mne_nirs.visualisation.plot_glm_contrast_topo', 'mne_nirs.visualisation.plot_glm_contrast_topo', (['raw_haemo', 'contrast'], {}), '(raw_haemo, contrast)\n', (5144, 5165), False, 'import mne_nirs\n'), ((6043, 6092), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (6065, 6092), False, 'import pytest\n'), ((6113, 6177), 'mne_nirs.statistics.compute_contrast', 'mne_nirs.statistics.compute_contrast', (['glm_est.data', 'contrast_LvR'], {}), '(glm_est.data, contrast_LvR)\n', (6149, 6177), False, 'import mne_nirs\n'), ((6200, 6249), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (6222, 6249), False, 'import pytest\n'), ((6265, 6331), 'mne_nirs.visualisation.plot_glm_contrast_topo', 'mne_nirs.visualisation.plot_glm_contrast_topo', (['raw_haemo', 'contrast'], {}), '(raw_haemo, contrast)\n', (6310, 6331), False, 'import mne_nirs\n'), ((6485, 6546), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Unable to extract figure"""'}), "(RuntimeError, match='Unable to extract figure')\n", (6498, 6546), False, 'import pytest\n'), ((6556, 6585), 'mne_nirs.visualisation._plot_GLM_topo._get_fig_from_axes', '_get_fig_from_axes', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (6574, 6585), False, 'from mne_nirs.visualisation._plot_GLM_topo import _get_fig_from_axes\n'), ((8257, 8308), 'mne.channels.make_standard_montage', 'mne.channels.make_standard_montage', (['"""standard_1020"""'], {}), "('standard_1020')\n", (8291, 8308), False, 'import mne\n'), ((8476, 8503), 'numpy.arange', 'np.arange', (['(1)', '(n_labels // 2)'], {}), '(1, n_labels // 2)\n', (8485, 8503), True, 'import numpy as np\n'), ((8531, 8569), 'numpy.arange', 'np.arange', (['(n_labels // 2)', '(n_labels + 1)'], {}), '(n_labels // 2, n_labels + 1)\n', (8540, 8569), True, 'import numpy as np\n'), ((8671, 8686), 'mne.utils.catch_logging', 'catch_logging', ([], {}), '()\n', (8684, 8686), False, 'from mne.utils import catch_logging, check_version\n'), ((8703, 8851), 'mne_nirs.viz.plot_3d_montage', 'mne_nirs.viz.plot_3d_montage', (['raw.info', 'view_map'], {'subject': '"""sample"""', 'surface': '"""white"""', 'subjects_dir': 'subjects_dir', 'ch_names': 'ch_names', 'verbose': '(True)'}), "(raw.info, view_map, subject='sample', surface=\n 'white', subjects_dir=subjects_dir, ch_names=ch_names, verbose=True)\n", (8731, 8851), False, 'import mne_nirs\n'), ((7869, 7893), 'collections.defaultdict', 'defaultdict', (["(lambda : '')"], {}), "(lambda : '')\n", (7880, 7893), False, 'from collections import defaultdict\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import json
import random
import os
import string
import numpy as np
from parlai.core.agents import create_agent
from parlai.core.message import Message
from parlai.core.worlds import DialogPartnerWorld, validate
from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld
from parlai.utils.misc import warn_once
from .agents import _path, load_from_path
class InteractiveWorld(DialogPartnerWorld):
"""
Interactive world for Airdialogue.
Used for models trained on the task `-t wizard_of_wikipedia`. Automatically
retrieves knowledge from Wikipedia based on the conversation history using a
TF-IDF
retriever. Then uses a Transformer-based model to select a checked sentence
from
these retrieved passages.
"""
@staticmethod
def add_cmdline_args(argparser):
group = argparser.add_argument_group('Air Interactive World Args')
group.add_argument(
'--intent-choice',
type=int,
default=3,
help='number of intent choices in dialogue (default: 3)')
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
print('[ loading airdialogue.. ]')
self.opt = opt
self.cnt = 0
self.human_agent = self.agents[0]
self.model_agent = self.agents[1]
defaultagent = 'agent'
defaultsize = -1
task = opt.get('task', f'airdialogue:{defaultagent}:{defaultsize}')
self.agenttype = task.split(':')[1] if len(
task.split(':')) > 1 else defaultagent
self.datasize = int(
task.split(':')[2]) if len(task.split(':')) > 2 else defaultsize
if shared is not None:
self.messages = shared['messages']
self.actions = shared['actions']
self.expected_actions = shared['expected_actions']
self.num_ex = shared['num_ex']
self.intents = shared['intents']
self.intent_objs = shared['intent_objs']
self.kbs = shared['kbs']
else:
self.messages = []
self.actions = []
self.expected_actions = []
self.intents = []
self.intent_objs = []
self.kbs = []
self.num_ex = 0
jsons_path = _path(opt)
self._setup_data(jsons_path)
self.num_intent_choice = opt.get('intent_choice', 3)
def _setup_data(self, jsons_path):
data_path = os.path.join(jsons_path, 'dev_data.json')
kb_path = os.path.join(jsons_path, 'dev_kb.json')
size = self.datasize
load_from_path(
self,
data_path,
kb_path,
size,
load_intent=True,
load_kb=True,
load_expected_action=True)
def _get_new_intent(self):
random.seed()
intent_ids = random.sample(
range(len(self.intents)), self.num_intent_choice - 1)
intents = [self.intents[i] for i in intent_ids]
intents.append('[OTHER INTENT]')
letters = list(string.ascii_uppercase)[:self.num_intent_choice]
intent_list = {x: y for x, y in zip(letters, intents)}
intent_text = '\n'.join(
['{}: {}'.format(k, v) for k, v in intent_list.items()])
intent_id_list = {x: y for x, y in zip(letters[:-1], intent_ids)}
done = False
while not done:
self.human_agent.observe({
'text':
'Your role is {}\nPlease choose one of the following intents by typing '
'A, B, C, ..., etc. : \n\n{}\n'.format(self.agenttype,
intent_text)
})
intent_act = self.human_agent.act()
choice = intent_act['text'][0].upper()
if choice in intent_list:
if intent_list[choice] == '[OTHER INTENT]':
intent_ids = random.sample(
range(len(self.intents)), self.num_intent_choice - 1)
intents = [self.intents[i] for i in intent_ids]
intents.append('[OTHER INTENT]')
letters = list(string.ascii_uppercase)[:self.num_intent_choice]
intent_list = {x: y for x, y in zip(letters, intents)}
intent_text = '\n'.join(
['{}: {}'.format(k, v) for k, v in intent_list.items()])
intent_id_list = {x: y for x, y in zip(letters[:-1], intent_ids)}
else:
done = True
else:
self.human_agent.observe(
{'text': 'Invalid response, please try again.'})
self.human_agent.observe(
{'text': f'[Your chosen intent is: {intent_list[choice]}]'})
chosen_id = intent_id_list[choice]
expected_action = self.expected_actions[chosen_id]
self.human_agent.observe(
{'text': f'[expected action is: {expected_action}]'})
for flight in expected_action['flight']:
expected_flight = flight - 1000
# import ipdb; ipdb.set_trace()
expected_flight = self.kbs[chosen_id]['kb'][expected_flight]
self.human_agent.observe(
{'text': f'[expected flight is: {expected_flight}]'})
if len(expected_action['flight']) == 0:
self.human_agent.observe({'text': f'[expected flight is: None]'})
reservation = self.kbs[chosen_id]['reservation']
self.human_agent.observe(
{'text': f'[reservation flight is: {reservation}]'})
return chosen_id
def _add_context(self, action):
entrys = self.messages[self.context_id][0].split('\n')
entrys[-1] = action['text']
if self.agenttype == 'agent':
action['tickets'] = entrys[:-2]
action['reservation'] = entrys[-2]
# the following are actually not used in eval just for calculate loss
# need to remove in the future
action['action_name'] = self.actions[self.context_id]['name']
action['action_flight'] = self.actions[self.context_id]['flight']
action['action_status'] = self.actions[self.context_id]['status']
action['action_intent'] = self.actions[self.context_id]['intent']
elif self.agenttype == 'customer':
action['intent'] = entrys[0]
assert len(entrys) == 2
action['return_encoder_state'] = True
return action
def reset(self):
super().reset()
self.cnt = 0
self.context_id = None
self.model_agent.reset()
self.human_agent.reset()
self.acts = [None, None]
def get_air_score(self):
score_obj = self.model_agent.get_air_score(
self.acts[1]['encoder_states'], self.expected_actions[self.context_id],
self.kbs[self.context_id])
score_text = '\n'.join([f" - {k}: {v}" for k, v in score_obj.items()])
for flight in score_obj['flight']:
chosen_flight = self.kbs[self.context_id]['kb'][flight - 1000]
score_text += f'\nChosen Flight: {chosen_flight}'
self.human_agent.observe({
'id': 'Final Agent Prediction',
'text': '\n' + score_text
})
return score_obj
def parley(self):
"""
Loop between model and human.
"""
if self.cnt == 0:
self.context_id = self._get_new_intent()
self.acts = [None, None]
self.human_first = random.choice([0, 1])
# possibly get human act first
if self.cnt == 0 and not self.human_first:
self.acts[0] = Message({'text': '__SILENCE__', 'episode_done': False})
else:
try:
self.acts[0] = self.human_agent.act()
except StopIteration:
if self.agenttype != 'customer':
self.get_air_score()
print('[ CHAT DONE ]')
print('\n[ Preparing new chat... ]\n')
self.reset()
return
act = deepcopy(self.acts[0])
# add context to the model observation
act = self._add_context(act)
# model observes context and human (apprentice) act
self.model_agent.observe(validate(act))
# model agent act
self.acts[1] = self.model_agent.act()
# human (apprentice) agent observes model act
# remove encoder_states to prevent output
act = deepcopy(self.acts[1])
if 'encoder_states' in act:
del act['encoder_states']
self.human_agent.observe(validate(act))
self.update_counters()
self.cnt += 1
if self.episode_done():
print('[ CHAT DONE ]')
print('\n[ Preparing new chat... ]\n')
self.cnt = 0
self.model_agent.reset()
class InteractiveCustomerWorld(InteractiveWorld):
pass
class InteractiveAgentWorld(InteractiveWorld):
pass
class SelfChatBothWorld(InteractiveWorld):
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
assert self.agenttype == 'both', 'agenttype must be both for selfplay'
if opt['model_file'].split(':')[0] == 'human':
print('[Human Evaluation]')
self.human_eval = True
else:
self.human_eval = False
self.customer_agent = self.agents[0]
self.agent_agent = self.agents[1]
self.max_turn_cnt = self.opt.get('selfchat_max_turns', 10)
self.episode_cnt = 0
self.agent_encoder_states = None
self.score = None
self.gather_rewards = {
'reward': [],
'flight_score': [],
'name_score': [],
'status_score': [],
}
self.start_cid = self.opt.get('start_cid', 0)
@staticmethod
def add_cmdline_args(argparser):
group = argparser.add_argument_group('Air SelfChat World Args')
group.add_argument(
'--start-cid', type=int, default=0, help='offset of contextid')
def display(self):
s = super().display()
if self.cnt == 0:
s += '\n==============================\n'
return s
def _add_context(self, action, agenttype):
entrys = self.messages[self.context_id][0].split('\n')
entrys[-1] = action['text']
if agenttype == 'agent':
action['tickets'] = entrys[:-3]
action['reservation'] = entrys[-3]
# the following are actually not used in eval just for calculate loss
# need to remove in the future
action['action_name'] = self.actions[self.context_id]['name']
action['action_flight'] = self.actions[self.context_id]['flight']
action['action_status'] = self.actions[self.context_id]['status']
action['action_intent'] = self.actions[self.context_id]['intent']
elif agenttype == 'customer':
action['intent'] = entrys[-2]
return action
def episode_done(self):
# add a heuristic for episode_done
# this one will break the current parlai selfplay script
if self.acts[0] is not None and self.acts[1] is not None:
if 'thank you' in self.acts[0]['text'].lower(
) and 'thank you' in self.acts[1]['text'].lower():
return True
if 'have a nice day' in self.acts[0]['text'].lower(
) or 'have a nice day' in self.acts[1]['text'].lower():
return True
if 'thank you' in self.acts[0]['text'].lower(
) and 'welcome' in self.acts[1]['text'].lower():
return True
if 'welcome' in self.acts[0]['text'].lower(
) and 'thank you' in self.acts[1]['text'].lower():
return True
if self.human_done:
return True
return self.cnt >= self.max_turn_cnt
def get_air_score(self):
score_obj = self.model_agent.get_air_score(
self.agent_encoder_states, self.expected_actions[self.context_id],
self.kbs[self.context_id])
score_text = '\n'.join([f" - {k}: {v}" for k, v in score_obj.items()])
for flight in score_obj['flight']:
chosen_flight = self.kbs[self.context_id]['kb'][flight - 1000]
score_text += f'\nChosen Flight: {chosen_flight}'
return score_obj, score_text
def write(self, logger, reports, outdir):
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, 'log.jsonl')
conversations = logger.get_logs()
# dont really how it works
# hack to remove empty logs
conversations = [i for i in conversations if len(i) > 0]
def format_conv(conv):
new_conv = []
for i in conv:
new_conv.append({'speaker': 'customer', 'text': i[0]['text']})
new_conv.append({'speaker': 'agent', 'text': i[1]['text']})
return new_conv
if len(conversations) != len(reports):
print('WARNING! length difference')
import ipdb
ipdb.set_trace()
with open(outfile, 'w') as fout:
#import ipdb; ipdb.set_trace()
for conv, re in zip(conversations, reports):
r = {}
r['conversation'] = format_conv(conv)
r['report'] = re
context_id = re['id']
r['expected_action'] = self.expected_actions[context_id]
r['intent'] = self.intent_objs[context_id]
r['kb'] = self.kbs[context_id]
fout.write(json.dumps(r) + '\n')
def report(self):
for k, v in self.gather_rewards.items():
v.append(self.score[k])
v = np.array(v).mean()
print(f"Gather {k} : {v}")
r = deepcopy(self.score)
r['id'] = self.context_id
return r
def reset(self):
#self.reset()
self.customer_agent.reset()
self.agent_agent.reset()
self.episode_cnt += 1
self.cnt = 0
self.acts = [None, None]
def customer_obs(self, act):
_act = act
self.predefine_acts = []
if self.human_eval:
_act = {}
_act['text'] = act['text']
_act['id'] = act['id']
if self.cnt == 0:
_act['intent'] = act['intent']
# define some template reponses to ease human eval
intent = self.intent_objs[self.context_id]
if self.cnt == 0:
print(intent)
if intent['goal'] == 'book':
self.predefine_acts.append('Hi, I want to book a ticket.')
else:
self.predefine_acts.append(
f"Hi, I want to {intent['goal']} a reservation.")
else:
self.predefine_acts.append(f"My name is {intent['name']}")
if intent['goal'] in ['book', 'change']:
self.predefine_acts.append(
f"My origin is {intent['departure_airport']} and destination is {intent['return_airport']}."
)
# Add dates
MONTH_DICT = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12'
}
m1 = MONTH_DICT[intent['departure_month'][:3]]
m2 = MONTH_DICT[intent['return_month'][:3]]
d1 = m1 + '/' + intent['departure_day']
if 'departure_time' in intent and intent['departure_time'] != 'None':
d1 += ' ' + intent['departure_time']
d2 = m2 + '/' + intent['return_day']
if 'return_time' in intent and intent['return_time'] != 'None':
d2 += ' ' + intent['return_time']
self.predefine_acts.append(f"Start on {d1} and return on {d2}.")
# Add specification
spec = ''
if 'max_connections' in intent:
spec += f"The connection limit is {intent['max_connections']} . "
if 'max_price' in intent:
spec += f"The price limit is {intent['max_price']} . "
pref = []
if 'class' in intent and intent['class'] != 'None':
pref.append(f"{intent['class']} class")
if 'airline' in intent:
pref.append(f"{intent['airline']} airline")
if len(pref) == 1:
spec += f"And I prefer {pref[0]} ."
elif len(pref) == 1:
spec += f"And I prefer {pref[0]} and {pref[1]} ."
self.predefine_acts.append(spec)
self.predefine_acts.extend(
['Yes.', 'Ok.', 'Thank you.', "That's fine, thank you."])
if 'sorry' in _act['text'] or 'no reservation' in _act['text']:
# say that's fine
self.predefine_acts = [self.predefine_acts[-1]
] + self.predefine_acts[:-1]
elif 'airport' in _act['text'] or 'scource' in _act['text'] or 'destination' in _act['text'] \
or 'details' in _act['text'] or 'codes' in _act['text']:
# say airport
self.predefine_acts = [
self.predefine_acts[1]
] + self.predefine_acts[0:1] + self.predefine_acts[2:]
elif 'dates' in _act['text']:
# say dates
self.predefine_acts = [
self.predefine_acts[2]
] + self.predefine_acts[0:2] + self.predefine_acts[3:]
elif 'proceed for booking' in _act['text'] or 'shall' in _act['text'] or 'are you ok with' in _act['text'] \
or 'would you like' in _act['text'] or 'can i' in _act['text']:
# say yes
self.predefine_acts = [
self.predefine_acts[-4]
] + self.predefine_acts[:-4] + self.predefine_acts[-3:]
elif 'wait' in _act['text']:
# say ok
self.predefine_acts = [
self.predefine_acts[-3]
] + self.predefine_acts[:-3] + self.predefine_acts[-2:]
elif 'booked' in _act['text'] or 'has been' in _act['text'] or \
'is done' in _act['text'] or 'is confirmed' in _act['text']:
# say thank you
self.predefine_acts = [
self.predefine_acts[-2]
] + self.predefine_acts[:-2] + self.predefine_acts[-1:]
try:
if self.customer_agent.ref_data is not None:
ref_text = self.customer_agent.ref_data[self.context_id][self.cnt * 2
+ 2]['text']
self.predefine_acts = [ref_text] + self.predefine_acts
except:
pass
for i, t in enumerate(self.predefine_acts):
_act[f"Act -{i}"] = t
self.customer_agent.observe(validate(_act))
def customer_act(self):
if not self.human_eval or len(self.predefine_acts) == 0:
return self.customer_agent.act()
else:
act = self.customer_agent.act()
text = act['text']
if len(text) == 2 and text[0] == '-' and text[1:].isdigit():
text = text[1:]
if int(text) < len(self.predefine_acts):
act.force_set('text', self.predefine_acts[int(text)])
act.force_set('id', 'customer')
print(act['text'])
if 'thank you' in act['text'].lower():
self.human_done = True
return act
def parley(self):
"""
Loop between model and human.
"""
self.human_done = False
if self.cnt == 0:
self.context_id = self.episode_cnt + self.start_cid
self.acts = [None, None]
self.agent_first = False
# possibly get customer act first
if self.cnt == 0 and not self.agent_first:
self.acts[0] = Message({
'id': 'customer',
'text': '__SILENCE__',
'episode_done': False
})
else:
if self.cnt == 0:
preact = Message({'text': '__SILENCE__', 'episode_done': False})
preact = self._add_context(preact, 'customer')
self.customer_obs(preact)
act = self.customer_act()
self.acts[0] = act
# add context to the model observation
act = deepcopy(self.acts[0])
act = self._add_context(act, 'agent')
act['return_encoder_state'] = True
# agent observes context and human (apprentice) act
self.agent_agent.observe(validate(act))
# agent agent act
act = self.agent_agent.act()
self.agent_encoder_states = act.pop('encoder_states')
self.acts[1] = act
# customer agent observes model act
act = deepcopy(self.acts[1])
act = self._add_context(act, 'customer')
self.customer_obs(act)
self.update_counters()
self.cnt += 1
if self.episode_done():
score_obj, score_text = self.get_air_score()
self.score = score_obj
#print(score_text)
return True
return False
| [
"copy.deepcopy",
"os.makedirs",
"ipdb.set_trace",
"random.choice",
"json.dumps",
"parlai.core.worlds.validate",
"random.seed",
"numpy.array",
"parlai.core.message.Message",
"os.path.join"
] | [((2913, 2954), 'os.path.join', 'os.path.join', (['jsons_path', '"""dev_data.json"""'], {}), "(jsons_path, 'dev_data.json')\n", (2925, 2954), False, 'import os\n'), ((2969, 3008), 'os.path.join', 'os.path.join', (['jsons_path', '"""dev_kb.json"""'], {}), "(jsons_path, 'dev_kb.json')\n", (2981, 3008), False, 'import os\n'), ((3235, 3248), 'random.seed', 'random.seed', ([], {}), '()\n', (3246, 3248), False, 'import random\n'), ((7951, 7973), 'copy.deepcopy', 'deepcopy', (['self.acts[0]'], {}), '(self.acts[0])\n', (7959, 7973), False, 'from copy import deepcopy\n'), ((8324, 8346), 'copy.deepcopy', 'deepcopy', (['self.acts[1]'], {}), '(self.acts[1])\n', (8332, 8346), False, 'from copy import deepcopy\n'), ((11936, 11970), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (11947, 11970), False, 'import os\n'), ((11985, 12018), 'os.path.join', 'os.path.join', (['outdir', '"""log.jsonl"""'], {}), "(outdir, 'log.jsonl')\n", (11997, 12018), False, 'import os\n'), ((13141, 13161), 'copy.deepcopy', 'deepcopy', (['self.score'], {}), '(self.score)\n', (13149, 13161), False, 'from copy import deepcopy\n'), ((19402, 19424), 'copy.deepcopy', 'deepcopy', (['self.acts[0]'], {}), '(self.acts[0])\n', (19410, 19424), False, 'from copy import deepcopy\n'), ((19795, 19817), 'copy.deepcopy', 'deepcopy', (['self.acts[1]'], {}), '(self.acts[1])\n', (19803, 19817), False, 'from copy import deepcopy\n'), ((7477, 7498), 'random.choice', 'random.choice', (['[0, 1]'], {}), '([0, 1])\n', (7490, 7498), False, 'import random\n'), ((7603, 7658), 'parlai.core.message.Message', 'Message', (["{'text': '__SILENCE__', 'episode_done': False}"], {}), "({'text': '__SILENCE__', 'episode_done': False})\n", (7610, 7658), False, 'from parlai.core.message import Message\n'), ((8137, 8150), 'parlai.core.worlds.validate', 'validate', (['act'], {}), '(act)\n', (8145, 8150), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((8440, 8453), 'parlai.core.worlds.validate', 'validate', (['act'], {}), '(act)\n', (8448, 8453), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((12521, 12537), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (12535, 12537), False, 'import ipdb\n'), ((18043, 18057), 'parlai.core.worlds.validate', 'validate', (['_act'], {}), '(_act)\n', (18051, 18057), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((18983, 19056), 'parlai.core.message.Message', 'Message', (["{'id': 'customer', 'text': '__SILENCE__', 'episode_done': False}"], {}), "({'id': 'customer', 'text': '__SILENCE__', 'episode_done': False})\n", (18990, 19056), False, 'from parlai.core.message import Message\n'), ((19592, 19605), 'parlai.core.worlds.validate', 'validate', (['act'], {}), '(act)\n', (19600, 19605), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((19146, 19201), 'parlai.core.message.Message', 'Message', (["{'text': '__SILENCE__', 'episode_done': False}"], {}), "({'text': '__SILENCE__', 'episode_done': False})\n", (19153, 19201), False, 'from parlai.core.message import Message\n'), ((13081, 13092), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (13089, 13092), True, 'import numpy as np\n'), ((12953, 12966), 'json.dumps', 'json.dumps', (['r'], {}), '(r)\n', (12963, 12966), False, 'import json\n')] |
"""
General Setup and Imports
"""
get_ipython().run_line_magic('matplotlib', 'tk')
import matplotlib.pyplot as plt
from bluesky import RunEngine
from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.plans import *
from bluesky.preprocessors import run_wrapper
from bluesky.utils import install_nb_kicker
from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe
from functools import partial
from ophyd import Device, Component as Cpt
from ophyd.sim import SynAxis, SynSignal
from ophyd.signal import EpicsSignalRO
from bluesky.callbacks import LivePlot
from pswalker.plans import walk_to_pixel
import pcdsdevices
import numpy as np
import random
from bluesky.simulators import summarize_plan
from pcdsdevices.device_types import Newport
import argparse
def centroid_from_motor_cross(motor, motor2, size=640., scale=1., noise_scale = 1, cross_scale = .1):
"""
Find the centroid from the current position of the motor
"""
noise = np.random.normal(scale = noise_scale)
position = motor.position
position2 = motor2.position
centroid = position*scale + position2*cross_scale
# If we are off the screen just return a value of 0.
if centroid < 0. or centroid > size:
return 0.
# Otherwise give the result
else:
return centroid+noise
def plan_simultaneously(x_centroid, y_centroid, x, y, x_target=None, y_target= None):
"""
This BlueSky plan aligns the laser's centroid with the x-ray's centroid.
This plan implements 'walk_to_pixel' from the pswalker (a beam alignment module). The plan uses an iterative procedure to align any beam to a position on a screen, when two motors move the beam along the two axes. Liveplots are updated and show the paths taken to achieve alignment.
Parameters
----------
x_centroid, y_centroid :
These are the x_centroid and y_centroid
x, y:
These are the x_motor and y_motor
x_target, y_target : int
Target value on the x-axis and y-axis
"""
#Create a figure
fig = plt.figure(figsize=(15,10))
fig.subplots_adjust(hspace=0.3, wspace=0.4)
#The first subplot, which plots the y_centroid vs x_centroid
ax1 = fig.add_subplot(2, 2, 1)
ax1.invert_yaxis()
x_centroid_y_centroid = LivePlot(y_centroid.name, x_centroid.name, ax = ax1, marker='x', markersize=7, color='orange')
#The second subplot, which plots the y_centroid and x_centroid with same x-axis (y_motor)
ax2 = fig.add_subplot(2, 2, 3)
ax2.set_ylabel(y_centroid.name, color='red')
ax3 = ax2.twinx()
# ax2.invert_yaxis()
# ax3.invert_yaxis()
ax3.set_ylabel(x_centroid.name, color='blue')
y_plot_y_centroid = LivePlot(y_centroid.name, y.name, ax = ax2, marker='x', markersize=6, color='red')
y_plot_x_centroid = LivePlot(x_centroid.name, y.name, ax = ax3, marker='o', markersize=6, color='blue')
#The third subplot, which plots the y_centroid and x_centroid with same x-axis (x_motor)
ax4 = fig.add_subplot(2, 2, 4)
ax4.set_ylabel(y_centroid.name, color='green')
ax5 = ax4.twinx()
ax5.set_ylabel(x_centroid.name, color='purple')
x_plot_y_centroid = LivePlot(y_centroid.name, x.name, ax = ax4, marker='x', markersize=6, color='green')
x_plot_x_centroid = LivePlot(x_centroid.name, x.name, ax = ax5, marker='o', markersize=6, color='purple')
#Subscribe the plots
token_x_centroid_y_centroid = yield from subscribe('all', x_centroid_y_centroid)
token_y_plot_x_centroid = yield from subscribe('all', y_plot_x_centroid)
token_y_plot_y_centroid = yield from subscribe('all', y_plot_y_centroid)
token_x_plot_x_centroid = yield from subscribe('all', x_plot_x_centroid)
token_x_plot_y_centroid = yield from subscribe('all', x_plot_y_centroid)
#Start a new run
yield from open_run(md={'detectors': [(x_centroid.name), (y_centroid.name)],
'motors': [(x.name), (y.name)],
'hints': {'dimensions': [(x.hints['fields'], 'primary'),
(y.hints['fields'], 'primary')]}})
#Ask for the target values
if x_target is None:
x_target = int(input('Enter the x value: '))
if y_target is None:
y_target = int(input('Enter the y value: '))
#Iteratively move until x_target and x-centroid are within a certain threshold of each other
while True:
if not np.isclose(x_target, x_centroid.get(), atol=3):
yield from walk_to_pixel(x_centroid, x, x_target, first_step=0.1,
target_fields=[x_centroid.name, x.name], tolerance = 3, average = 5,
system=[y, y_centroid])
elif not np.isclose(y_target, y_centroid.get(), atol = 3):
yield from walk_to_pixel(y_centroid, y, y_target, first_step=0.1, tolerance = 3, average = 5,
target_fields=[y_centroid.name, y.name],
system=[x, x_centroid])
else:
break
# plt.show(block=True)
#Close the run
yield from close_run()
#Unsubscribe the plots
yield from unsubscribe(token_x_centroid_y_centroid)
yield from unsubscribe(token_y_plot_x_centroid)
yield from unsubscribe(token_y_plot_y_centroid)
yield from unsubscribe(token_x_plot_x_centroid)
yield from unsubscribe(token_x_plot_y_centroid)
if __name__ == '__main__':
"""
This creates multiple dependencies that users can use when running the Spatial Overlap Scan
"""
parser = argparse.ArgumentParser(description='Spatial overlap of timetool')
parser.add_argument('--sim', action='store_true', default=False, help='Do a simulated scan')
args = parser.parse_args()
# Interactive matplotlib mode
plt.ion()
# Create a RunEngine
RE = RunEngine()
# Use BestEffortCallback for nice vizualizations during scans
bec = BestEffortCallback()
# Install our notebook kicker to have plots update during a scan
install_nb_kicker()
if args.sim:
# Create our motors
x_motor = SynAxis(name='x')
y_motor = SynAxis(name='y')
#Defines relationships between centroids and motors
x_centroid = SynSignal(func=partial(centroid_from_motor_cross, x_motor,y_motor, noise_scale = 1), name='x_syn')
y_centroid = SynSignal(func=partial(centroid_from_motor_cross, y_motor,x_motor), name='y_syn')
print('Running Simulated Scan')
else:
#The Newport motors
x_motor = Newport('XPP:LAS:MMN:13', name = 'real_x')
y_motor = Newport('XPP:LAS:MMN:14', name = 'real_y')
#Readback from actual beamline devices
x_centroid = EpicsSignalRO('XPP:OPAL1K:01:Stats2:CentroidX_RBV', name = 'x_readback')
y_centroid = EpicsSignalRO('XPP:OPAL1K:01:Stats2:CentroidY_RBV', name = 'y_readback')
print('Running Real Scan')
#Executes the plan
RE(plan_simultaneously(x_centroid, y_centroid, x_motor, y_motor), md={'plan_name': 'special'})
print('Spatial Overlap Scan is complete')
"""
Things to fix/consider:
Lose ipython dependency
User can set tolerance(Look at Spatial_Overlap_Scan_Annotated_Dependecoes.py)
Solve edge case:
Limits of the motor motion
""" | [
"bluesky.utils.install_nb_kicker",
"bluesky.plan_stubs.close_run",
"ophyd.signal.EpicsSignalRO",
"functools.partial",
"argparse.ArgumentParser",
"bluesky.plan_stubs.unsubscribe",
"bluesky.callbacks.LivePlot",
"bluesky.callbacks.best_effort.BestEffortCallback",
"pswalker.plans.walk_to_pixel",
"matp... | [((997, 1032), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'noise_scale'}), '(scale=noise_scale)\n', (1013, 1032), True, 'import numpy as np\n'), ((2125, 2153), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2135, 2153), True, 'import matplotlib.pyplot as plt\n'), ((2357, 2453), 'bluesky.callbacks.LivePlot', 'LivePlot', (['y_centroid.name', 'x_centroid.name'], {'ax': 'ax1', 'marker': '"""x"""', 'markersize': '(7)', 'color': '"""orange"""'}), "(y_centroid.name, x_centroid.name, ax=ax1, marker='x', markersize=7,\n color='orange')\n", (2365, 2453), False, 'from bluesky.callbacks import LivePlot\n'), ((2783, 2868), 'bluesky.callbacks.LivePlot', 'LivePlot', (['y_centroid.name', 'y.name'], {'ax': 'ax2', 'marker': '"""x"""', 'markersize': '(6)', 'color': '"""red"""'}), "(y_centroid.name, y.name, ax=ax2, marker='x', markersize=6, color='red'\n )\n", (2791, 2868), False, 'from bluesky.callbacks import LivePlot\n'), ((2890, 2976), 'bluesky.callbacks.LivePlot', 'LivePlot', (['x_centroid.name', 'y.name'], {'ax': 'ax3', 'marker': '"""o"""', 'markersize': '(6)', 'color': '"""blue"""'}), "(x_centroid.name, y.name, ax=ax3, marker='o', markersize=6, color=\n 'blue')\n", (2898, 2976), False, 'from bluesky.callbacks import LivePlot\n'), ((3257, 3344), 'bluesky.callbacks.LivePlot', 'LivePlot', (['y_centroid.name', 'x.name'], {'ax': 'ax4', 'marker': '"""x"""', 'markersize': '(6)', 'color': '"""green"""'}), "(y_centroid.name, x.name, ax=ax4, marker='x', markersize=6, color=\n 'green')\n", (3265, 3344), False, 'from bluesky.callbacks import LivePlot\n'), ((3367, 3455), 'bluesky.callbacks.LivePlot', 'LivePlot', (['x_centroid.name', 'x.name'], {'ax': 'ax5', 'marker': '"""o"""', 'markersize': '(6)', 'color': '"""purple"""'}), "(x_centroid.name, x.name, ax=ax5, marker='o', markersize=6, color=\n 'purple')\n", (3375, 3455), False, 'from bluesky.callbacks import LivePlot\n'), ((5710, 5776), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Spatial overlap of timetool"""'}), "(description='Spatial overlap of timetool')\n", (5733, 5776), False, 'import argparse\n'), ((5952, 5961), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5959, 5961), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6007), 'bluesky.RunEngine', 'RunEngine', ([], {}), '()\n', (6005, 6007), False, 'from bluesky import RunEngine\n'), ((6084, 6104), 'bluesky.callbacks.best_effort.BestEffortCallback', 'BestEffortCallback', ([], {}), '()\n', (6102, 6104), False, 'from bluesky.callbacks.best_effort import BestEffortCallback\n'), ((6178, 6197), 'bluesky.utils.install_nb_kicker', 'install_nb_kicker', ([], {}), '()\n', (6195, 6197), False, 'from bluesky.utils import install_nb_kicker\n'), ((3530, 3569), 'bluesky.plan_stubs.subscribe', 'subscribe', (['"""all"""', 'x_centroid_y_centroid'], {}), "('all', x_centroid_y_centroid)\n", (3539, 3569), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((3611, 3646), 'bluesky.plan_stubs.subscribe', 'subscribe', (['"""all"""', 'y_plot_x_centroid'], {}), "('all', y_plot_x_centroid)\n", (3620, 3646), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((3688, 3723), 'bluesky.plan_stubs.subscribe', 'subscribe', (['"""all"""', 'y_plot_y_centroid'], {}), "('all', y_plot_y_centroid)\n", (3697, 3723), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((3765, 3800), 'bluesky.plan_stubs.subscribe', 'subscribe', (['"""all"""', 'x_plot_x_centroid'], {}), "('all', x_plot_x_centroid)\n", (3774, 3800), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((3842, 3877), 'bluesky.plan_stubs.subscribe', 'subscribe', (['"""all"""', 'x_plot_y_centroid'], {}), "('all', x_plot_y_centroid)\n", (3851, 3877), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((3919, 4109), 'bluesky.plan_stubs.open_run', 'open_run', ([], {'md': "{'detectors': [x_centroid.name, y_centroid.name], 'motors': [x.name, y.name\n ], 'hints': {'dimensions': [(x.hints['fields'], 'primary'), (y.hints[\n 'fields'], 'primary')]}}"}), "(md={'detectors': [x_centroid.name, y_centroid.name], 'motors': [x.\n name, y.name], 'hints': {'dimensions': [(x.hints['fields'], 'primary'),\n (y.hints['fields'], 'primary')]}})\n", (3927, 4109), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((5230, 5241), 'bluesky.plan_stubs.close_run', 'close_run', ([], {}), '()\n', (5239, 5241), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((5284, 5324), 'bluesky.plan_stubs.unsubscribe', 'unsubscribe', (['token_x_centroid_y_centroid'], {}), '(token_x_centroid_y_centroid)\n', (5295, 5324), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((5340, 5376), 'bluesky.plan_stubs.unsubscribe', 'unsubscribe', (['token_y_plot_x_centroid'], {}), '(token_y_plot_x_centroid)\n', (5351, 5376), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((5392, 5428), 'bluesky.plan_stubs.unsubscribe', 'unsubscribe', (['token_y_plot_y_centroid'], {}), '(token_y_plot_y_centroid)\n', (5403, 5428), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((5444, 5480), 'bluesky.plan_stubs.unsubscribe', 'unsubscribe', (['token_x_plot_x_centroid'], {}), '(token_x_plot_x_centroid)\n', (5455, 5480), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((5496, 5532), 'bluesky.plan_stubs.unsubscribe', 'unsubscribe', (['token_x_plot_y_centroid'], {}), '(token_x_plot_y_centroid)\n', (5507, 5532), False, 'from bluesky.plan_stubs import open_run, close_run, subscribe, unsubscribe\n'), ((6262, 6279), 'ophyd.sim.SynAxis', 'SynAxis', ([], {'name': '"""x"""'}), "(name='x')\n", (6269, 6279), False, 'from ophyd.sim import SynAxis, SynSignal\n'), ((6298, 6315), 'ophyd.sim.SynAxis', 'SynAxis', ([], {'name': '"""y"""'}), "(name='y')\n", (6305, 6315), False, 'from ophyd.sim import SynAxis, SynSignal\n'), ((6695, 6735), 'pcdsdevices.device_types.Newport', 'Newport', (['"""XPP:LAS:MMN:13"""'], {'name': '"""real_x"""'}), "('XPP:LAS:MMN:13', name='real_x')\n", (6702, 6735), False, 'from pcdsdevices.device_types import Newport\n'), ((6756, 6796), 'pcdsdevices.device_types.Newport', 'Newport', (['"""XPP:LAS:MMN:14"""'], {'name': '"""real_y"""'}), "('XPP:LAS:MMN:14', name='real_y')\n", (6763, 6796), False, 'from pcdsdevices.device_types import Newport\n'), ((6867, 6937), 'ophyd.signal.EpicsSignalRO', 'EpicsSignalRO', (['"""XPP:OPAL1K:01:Stats2:CentroidX_RBV"""'], {'name': '"""x_readback"""'}), "('XPP:OPAL1K:01:Stats2:CentroidX_RBV', name='x_readback')\n", (6880, 6937), False, 'from ophyd.signal import EpicsSignalRO\n'), ((6961, 7031), 'ophyd.signal.EpicsSignalRO', 'EpicsSignalRO', (['"""XPP:OPAL1K:01:Stats2:CentroidY_RBV"""'], {'name': '"""y_readback"""'}), "('XPP:OPAL1K:01:Stats2:CentroidY_RBV', name='y_readback')\n", (6974, 7031), False, 'from ophyd.signal import EpicsSignalRO\n'), ((4602, 4750), 'pswalker.plans.walk_to_pixel', 'walk_to_pixel', (['x_centroid', 'x', 'x_target'], {'first_step': '(0.1)', 'target_fields': '[x_centroid.name, x.name]', 'tolerance': '(3)', 'average': '(5)', 'system': '[y, y_centroid]'}), '(x_centroid, x, x_target, first_step=0.1, target_fields=[\n x_centroid.name, x.name], tolerance=3, average=5, system=[y, y_centroid])\n', (4615, 4750), False, 'from pswalker.plans import walk_to_pixel\n'), ((6412, 6479), 'functools.partial', 'partial', (['centroid_from_motor_cross', 'x_motor', 'y_motor'], {'noise_scale': '(1)'}), '(centroid_from_motor_cross, x_motor, y_motor, noise_scale=1)\n', (6419, 6479), False, 'from functools import partial\n'), ((6532, 6584), 'functools.partial', 'partial', (['centroid_from_motor_cross', 'y_motor', 'x_motor'], {}), '(centroid_from_motor_cross, y_motor, x_motor)\n', (6539, 6584), False, 'from functools import partial\n'), ((4910, 5058), 'pswalker.plans.walk_to_pixel', 'walk_to_pixel', (['y_centroid', 'y', 'y_target'], {'first_step': '(0.1)', 'tolerance': '(3)', 'average': '(5)', 'target_fields': '[y_centroid.name, y.name]', 'system': '[x, x_centroid]'}), '(y_centroid, y, y_target, first_step=0.1, tolerance=3, average\n =5, target_fields=[y_centroid.name, y.name], system=[x, x_centroid])\n', (4923, 5058), False, 'from pswalker.plans import walk_to_pixel\n')] |
import emcee
import numpy as np
from astropy.io import fits
from pylinear.utilities import indices,pool
def mp_mcmcUncertainty(A,bi,func,conf):
if A is None or bi is None:
return None,None,None
ndim=1
p0=[]
nwalkers=conf['nwalkers']
for i in range(nwalkers):
p0.append(np.array([func*2.*np.random.randn()]))
cindex=0
sampler=emcee.EnsembleSampler(nwalkers,ndim,lnlike,args=(A,bi))
#sampler=emcee.MHSampler(cov,ndim,lnlike,args=(A,bi))
sampler.run_mcmc(p0,conf['nstep'])
nburn=int(conf['burn']*conf['nstep'])
samples=sampler.chain[:,nburn:,:].reshape((-1,1))
ss=np.std(samples,axis=0)
ll=np.percentile(samples,31.7,axis=0)
aa=np.percentile(samples,50.0,axis=0)
hh=np.percentile(samples,68.3,axis=0)
lo=aa[0]-ll[0]
hi=hh[0]-aa[0]
sig=ss[0]
return lo,hi,sig
def lnlike(x,A,bi):
resid=bi-A.matvec(x)
lnl=-0.5*np.sum(resid*resid)
return lnl
def mcmcStart(data,mat,resid,conf):
return mp_mcmcUncertainty(*mat.residualMatrix(data[0],resid),data[1],conf)
def mcmcUncertainties(conf,mat,result):
if not conf['perform']:
return result
print('[info]Computing MCMC uncertainties')
# compute the residuals
resid=mat.bi-mat.A.matvec(result.x)
# set up the iterates
iters=[(j,f) for j,f in enumerate(result.lo)]
# do the processing
p=pool.Pool(ncpu=conf['cpu']['ncpu'])
unc=p(mcmcStart,iters,mat,resid,conf,prefix='Running MCMC')
# package the outputs
unc=list(zip(*unc))
result.lo=np.array(unc[0])
result.hi=np.array(unc[1])
del unc
return result
| [
"numpy.sum",
"numpy.random.randn",
"emcee.EnsembleSampler",
"numpy.std",
"numpy.percentile",
"pylinear.utilities.pool.Pool",
"numpy.array"
] | [((388, 447), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnlike'], {'args': '(A, bi)'}), '(nwalkers, ndim, lnlike, args=(A, bi))\n', (409, 447), False, 'import emcee\n'), ((662, 685), 'numpy.std', 'np.std', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (668, 685), True, 'import numpy as np\n'), ((692, 728), 'numpy.percentile', 'np.percentile', (['samples', '(31.7)'], {'axis': '(0)'}), '(samples, 31.7, axis=0)\n', (705, 728), True, 'import numpy as np\n'), ((734, 770), 'numpy.percentile', 'np.percentile', (['samples', '(50.0)'], {'axis': '(0)'}), '(samples, 50.0, axis=0)\n', (747, 770), True, 'import numpy as np\n'), ((776, 812), 'numpy.percentile', 'np.percentile', (['samples', '(68.3)'], {'axis': '(0)'}), '(samples, 68.3, axis=0)\n', (789, 812), True, 'import numpy as np\n'), ((1432, 1467), 'pylinear.utilities.pool.Pool', 'pool.Pool', ([], {'ncpu': "conf['cpu']['ncpu']"}), "(ncpu=conf['cpu']['ncpu'])\n", (1441, 1467), False, 'from pylinear.utilities import indices, pool\n'), ((1597, 1613), 'numpy.array', 'np.array', (['unc[0]'], {}), '(unc[0])\n', (1605, 1613), True, 'import numpy as np\n'), ((1628, 1644), 'numpy.array', 'np.array', (['unc[1]'], {}), '(unc[1])\n', (1636, 1644), True, 'import numpy as np\n'), ((949, 970), 'numpy.sum', 'np.sum', (['(resid * resid)'], {}), '(resid * resid)\n', (955, 970), True, 'import numpy as np\n'), ((333, 350), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (348, 350), True, 'import numpy as np\n')] |
import numpy as np
import heapq
import tensorflow as tf
from sklearn.metrics import roc_auc_score
from layers import Dense, CrossCompressUnit
import metrics
def test_one_user(x, train_items, test_items, item_num, Ks):
rating, u = x[0], x[1]
training_items = train_items[u] if u in train_items else []
user_pos_test = test_items[u]
all_items = set(range(item_num))
test_items = list(all_items - set(training_items))
r, auc = ranklist_by_sorted(user_pos_test, test_items, rating, Ks)
return get_performance(user_pos_test, r, auc, Ks)
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
class MKR(object):
def __init__(self, args, n_users, n_items, n_entities, n_relations):
self._parse_args(n_users, n_items, n_entities, n_relations)
self._build_inputs()
self._build_model(args)
self._build_loss(args)
self._build_train(args)
def _parse_args(self, n_users, n_items, n_entities, n_relations):
self.n_user = n_users
self.n_item = n_items
self.n_entity = n_entities
self.n_relation = n_relations
# for computing l2 loss
self.vars_rs = []
self.vars_kge = []
def _build_inputs(self):
self.user_indices = tf.placeholder(tf.int32, [None], 'user_indices')
self.item_indices = tf.placeholder(tf.int32, [None], 'item_indices')
self.labels = tf.placeholder(tf.float32, [None], 'labels')
def _build_model(self, args):
self._build_low_layers(args)
self._build_high_layers(args)
def _build_low_layers(self, args):
self.user_emb_matrix = tf.get_variable('user_emb_matrix', [self.n_user, args.dim])
self.item_emb_matrix = tf.get_variable('item_emb_matrix', [self.n_item, args.dim])
# [batch_size, dim]
self.user_embeddings = tf.nn.embedding_lookup(self.user_emb_matrix, self.user_indices)
self.item_embeddings = tf.nn.embedding_lookup(self.item_emb_matrix, self.item_indices)
for _ in range(args.L):
user_mlp = Dense(input_dim=args.dim, output_dim=args.dim)
item_mlp = Dense(input_dim=args.dim, output_dim=args.dim)
self.user_embeddings = user_mlp(self.user_embeddings)
self.item_embeddings = item_mlp(self.item_embeddings)
self.vars_rs.extend(user_mlp.vars)
self.vars_rs.extend(item_mlp.vars)
def _build_high_layers(self, args):
# RS
use_inner_product = True
if use_inner_product:
# [batch_size]
self.scores = tf.reduce_sum(self.user_embeddings * self.item_embeddings, axis=1)
else:
# [batch_size, dim * 2]
self.user_item_concat = tf.concat([self.user_embeddings, self.item_embeddings], axis=1)
for _ in range(args.H - 1):
rs_mlp = Dense(input_dim=args.dim * 2, output_dim=args.dim * 2)
# [batch_size, dim * 2]
self.user_item_concat = rs_mlp(self.user_item_concat)
self.vars_rs.extend(rs_mlp.vars)
rs_pred_mlp = Dense(input_dim=args.dim * 2, output_dim=1)
# [batch_size]
self.scores = tf.squeeze(rs_pred_mlp(self.user_item_concat))
self.vars_rs.extend(rs_pred_mlp.vars)
self.scores_normalized = tf.nn.sigmoid(self.scores)
def _build_loss(self, args):
# RS
self.base_loss_rs = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels, logits=self.scores))
self.l2_loss_rs = tf.nn.l2_loss(self.user_embeddings) + tf.nn.l2_loss(self.item_embeddings)
for var in self.vars_rs:
self.l2_loss_rs += tf.nn.l2_loss(var)
self.loss_rs = self.base_loss_rs + self.l2_loss_rs * args.l2_weight
def _build_train(self, args):
# TODO: better optimizer?
self.optimizer_rs = tf.train.AdamOptimizer(args.lr_rs).minimize(self.loss_rs)
def train_rs(self, sess, feed_dict):
return sess.run([self.optimizer_rs, self.loss_rs], feed_dict)
def eval(self, sess, feed_dict):
labels, scores = sess.run([self.labels, self.scores_normalized], feed_dict)
auc = roc_auc_score(y_true=labels, y_score=scores)
predictions = [1 if i >= 0.5 else 0 for i in scores]
true_positives = sum([1 if p == 1 and l == 1 else 0 for p, l in zip(predictions, labels)])
precision = true_positives / sum(predictions)
recall = true_positives / sum(labels)
f1 = 2 * precision * recall / (precision + recall)
acc = np.mean(np.equal(predictions, labels))
return auc, acc, precision, recall, f1
def calc_ndcg(self, sess, model, train_data, test_data, batch_size):
Ks = [20, 40, 60, 80, 100]
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
item_num = max(np.max(train_data[:, 1]), np.max(test_data[:, 1]))
test_users = []
train_items, test_items = {}, {}
for uid, iid, label in train_data:
if label == 1:
if uid not in train_items:
train_items[uid] = []
train_items[uid].append(iid)
for uid, iid, label in test_data:
if label == 1:
if uid not in test_items:
test_items[uid] = []
test_items[uid].append(iid)
if uid not in test_users:
test_users.append(uid)
n_test_users = len(test_users)
n_item_batchs = item_num // batch_size + 1
for i, uid in enumerate(test_users):
if (i + 1) % 500 == 0:
print("user:::", i, '/', len(test_users))
item_batch = range(item_num)
feed_dict = {model.user_indices: [uid] * item_num,
model.item_indices: item_batch,
model.labels: [1] * item_num,
model.head_indices: item_batch}
rate_batch = sess.run(self.scores_normalized, feed_dict)
re = test_one_user([rate_batch, uid], train_items, test_items, item_num, Ks)
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
return result
def get_scores(self, sess, feed_dict):
return sess.run([self.item_indices, self.scores_normalized], feed_dict)
| [
"tensorflow.reduce_sum",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.get_variable",
"metrics.ndcg_at_k",
"metrics.auc",
"tensorflow.concat",
"heapq.nlargest",
"numpy.equal",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.nn.embedding_lookup",
"sklearn.metrics.roc_auc_sco... | [((749, 802), 'heapq.nlargest', 'heapq.nlargest', (['K_max', 'item_score'], {'key': 'item_score.get'}), '(K_max, item_score, key=item_score.get)\n', (763, 802), False, 'import heapq\n'), ((1857, 1906), 'metrics.auc', 'metrics.auc', ([], {'ground_truth': 'r', 'prediction': 'posterior'}), '(ground_truth=r, prediction=posterior)\n', (1868, 1906), False, 'import metrics\n'), ((1366, 1382), 'numpy.array', 'np.array', (['recall'], {}), '(recall)\n', (1374, 1382), True, 'import numpy as np\n'), ((1397, 1416), 'numpy.array', 'np.array', (['precision'], {}), '(precision)\n', (1405, 1416), True, 'import numpy as np\n'), ((1438, 1452), 'numpy.array', 'np.array', (['ndcg'], {}), '(ndcg)\n', (1446, 1452), True, 'import numpy as np\n'), ((1467, 1486), 'numpy.array', 'np.array', (['hit_ratio'], {}), '(hit_ratio)\n', (1475, 1486), True, 'import numpy as np\n'), ((2555, 2603), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""user_indices"""'], {}), "(tf.int32, [None], 'user_indices')\n", (2569, 2603), True, 'import tensorflow as tf\n'), ((2632, 2680), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""item_indices"""'], {}), "(tf.int32, [None], 'item_indices')\n", (2646, 2680), True, 'import tensorflow as tf\n'), ((2703, 2747), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]', '"""labels"""'], {}), "(tf.float32, [None], 'labels')\n", (2717, 2747), True, 'import tensorflow as tf\n'), ((2929, 2988), 'tensorflow.get_variable', 'tf.get_variable', (['"""user_emb_matrix"""', '[self.n_user, args.dim]'], {}), "('user_emb_matrix', [self.n_user, args.dim])\n", (2944, 2988), True, 'import tensorflow as tf\n'), ((3020, 3079), 'tensorflow.get_variable', 'tf.get_variable', (['"""item_emb_matrix"""', '[self.n_item, args.dim]'], {}), "('item_emb_matrix', [self.n_item, args.dim])\n", (3035, 3079), True, 'import tensorflow as tf\n'), ((3140, 3203), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.user_emb_matrix', 'self.user_indices'], {}), '(self.user_emb_matrix, self.user_indices)\n', (3162, 3203), True, 'import tensorflow as tf\n'), ((3235, 3298), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.item_emb_matrix', 'self.item_indices'], {}), '(self.item_emb_matrix, self.item_indices)\n', (3257, 3298), True, 'import tensorflow as tf\n'), ((4619, 4645), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['self.scores'], {}), '(self.scores)\n', (4632, 4645), True, 'import tensorflow as tf\n'), ((5492, 5536), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ([], {'y_true': 'labels', 'y_score': 'scores'}), '(y_true=labels, y_score=scores)\n', (5505, 5536), False, 'from sklearn.metrics import roc_auc_score\n'), ((1150, 1178), 'metrics.precision_at_k', 'metrics.precision_at_k', (['r', 'K'], {}), '(r, K)\n', (1172, 1178), False, 'import metrics\n'), ((1269, 1292), 'metrics.ndcg_at_k', 'metrics.ndcg_at_k', (['r', 'K'], {}), '(r, K)\n', (1286, 1292), False, 'import metrics\n'), ((1319, 1341), 'metrics.hit_at_k', 'metrics.hit_at_k', (['r', 'K'], {}), '(r, K)\n', (1335, 1341), False, 'import metrics\n'), ((3355, 3401), 'layers.Dense', 'Dense', ([], {'input_dim': 'args.dim', 'output_dim': 'args.dim'}), '(input_dim=args.dim, output_dim=args.dim)\n', (3360, 3401), False, 'from layers import Dense, CrossCompressUnit\n'), ((3425, 3471), 'layers.Dense', 'Dense', ([], {'input_dim': 'args.dim', 'output_dim': 'args.dim'}), '(input_dim=args.dim, output_dim=args.dim)\n', (3430, 3471), False, 'from layers import Dense, CrossCompressUnit\n'), ((3869, 3935), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.user_embeddings * self.item_embeddings)'], {'axis': '(1)'}), '(self.user_embeddings * self.item_embeddings, axis=1)\n', (3882, 3935), True, 'import tensorflow as tf\n'), ((4022, 4085), 'tensorflow.concat', 'tf.concat', (['[self.user_embeddings, self.item_embeddings]'], {'axis': '(1)'}), '([self.user_embeddings, self.item_embeddings], axis=1)\n', (4031, 4085), True, 'import tensorflow as tf\n'), ((4392, 4435), 'layers.Dense', 'Dense', ([], {'input_dim': '(args.dim * 2)', 'output_dim': '(1)'}), '(input_dim=args.dim * 2, output_dim=1)\n', (4397, 4435), False, 'from layers import Dense, CrossCompressUnit\n'), ((4749, 4828), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'self.labels', 'logits': 'self.scores'}), '(labels=self.labels, logits=self.scores)\n', (4788, 4828), True, 'import tensorflow as tf\n'), ((4856, 4891), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.user_embeddings'], {}), '(self.user_embeddings)\n', (4869, 4891), True, 'import tensorflow as tf\n'), ((4894, 4929), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.item_embeddings'], {}), '(self.item_embeddings)\n', (4907, 4929), True, 'import tensorflow as tf\n'), ((4994, 5012), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (5007, 5012), True, 'import tensorflow as tf\n'), ((5878, 5907), 'numpy.equal', 'np.equal', (['predictions', 'labels'], {}), '(predictions, labels)\n', (5886, 5907), True, 'import numpy as np\n'), ((6252, 6276), 'numpy.max', 'np.max', (['train_data[:, 1]'], {}), '(train_data[:, 1])\n', (6258, 6276), True, 'import numpy as np\n'), ((6278, 6301), 'numpy.max', 'np.max', (['test_data[:, 1]'], {}), '(test_data[:, 1])\n', (6284, 6301), True, 'import numpy as np\n'), ((4151, 4205), 'layers.Dense', 'Dense', ([], {'input_dim': '(args.dim * 2)', 'output_dim': '(args.dim * 2)'}), '(input_dim=args.dim * 2, output_dim=args.dim * 2)\n', (4156, 4205), False, 'from layers import Dense, CrossCompressUnit\n'), ((5186, 5220), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['args.lr_rs'], {}), '(args.lr_rs)\n', (5208, 5220), True, 'import tensorflow as tf\n')] |
__author__ = "<NAME>"
__copyright__ = "Copyright, 2021, <NAME>"
__license__ = "3-Clause BSD License"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def polynomial_basis(theta: np.array, degree: int) -> np.array:
"""Calculates polynomial basis for the omnidirectional camera model.
Parameters
----------
theta : numpy.array
theta-angles for which the polynomial basis will be calculated for
degree : int
degree of the basis. E.g. degree = 2, basis = [1.0 theta theta^2]
Returns
-------
numpy.array
Polynomial basis vector/matrix. If theta = [theta1, theta2, theta3] and degree = 2, then
the basis will be:
[1.0, 1.0, 1.0;
theta1, theta2, theta3;
theta1^2, theta2^2, theta3^2]
"""
# Minimum degree is 1
if degree < 1:
raise Exception("Degree has to be 1 or greater!")
basis = np.empty((degree, theta.size), dtype=np.float)
basis[0,] = np.ones((1, theta.size))
for row in range(1, degree):
basis[row,] = theta
for row in range(2, degree):
basis[row,] *= basis[row - 1,]
return basis
def perspective_lut(image_shape: tuple, principal_point: np.array, focal_length: float,
model_coefficients: np.array) -> tuple:
"""
Calculates a look-up-table (LUT) for converting images captured with an omnidirectional camera, described by
the model coefficients, into perspective camera images (i.e. pin-hole camera). The relation between the 3D half-ray
emanating from the single point and the corresponding pixel, observed in the image plane, is described by a
polynomial basis and the model coefficients. The look-up-table values can be used for converting images into
perspective camera images, for example, by using OpenCV's remap function:
cv2.remap(image, u, v, cv2.INTER_LINEAR)
For more information, take a look at the paper:
"A Toolbox for Easily Calibrating Omnidirectional Cameras", <NAME>, <NAME> and <NAME>.
Parameters
----------
image_shape : tuple of ints
Shape of the image (rows, cols, channels)
principal_point : (float, float)
Principal point (i.e. optical centre of the camera) [px, py]
focal_length : float
Focal length
model_coefficients :
Coefficients of the omnidirectional lens model (https://sites.google.com/site/scarabotix/ocamcalib-toolbox)
Returns
-------
(u, v)
A tuple containing the look-up-table values for converting images into perspective camera images.
u and v both have the same shape as image_shape (for rows and columns)
"""
focal_length = np.abs(focal_length)
# Create image coordinate mesh-grids. As the name implies, these are in the image coordinate system
# with the origin at the top left corner
u, v = np.meshgrid(
np.arange(image_shape[1], dtype=np.float),
np.arange(image_shape[0], dtype=np.float)
)
# Convert the coordinates into sensor coordinates (origin is at the principal point, and the
# sensor is a focal length distance away from the lens optical centre)
u -= principal_point[0]
v -= principal_point[1]
sensor_coords = np.vstack((u.flatten(), v.flatten(), np.ones(u.size) * focal_length))
# Calculate the polynomial basis for the camera/lens model
# rho is the Euclidean distance of the sensor position from the principal point
rho = np.sqrt(np.square(sensor_coords[0, :]) + np.square(sensor_coords[1, :]))
theta = np.arctan(np.divide(-sensor_coords[2,], rho))
# calculate the polynomial basis, based on the angle
basis = polynomial_basis(theta, model_coefficients.size)
r = np.multiply(model_coefficients.reshape((model_coefficients.size, -1)), basis)
r = np.sum(r, axis=0)
r /= rho
x_result = principal_point[0] + sensor_coords[0,] * r
y_result = principal_point[1] + sensor_coords[1,] * r
x_result = x_result.reshape((image_shape[0], image_shape[1]))
y_result = y_result.reshape((image_shape[0], image_shape[1]))
return x_result.astype(np.float32), y_result.astype(np.float32)
| [
"numpy.divide",
"numpy.sum",
"numpy.abs",
"numpy.empty",
"numpy.square",
"numpy.ones",
"numpy.arange"
] | [((1709, 1755), 'numpy.empty', 'np.empty', (['(degree, theta.size)'], {'dtype': 'np.float'}), '((degree, theta.size), dtype=np.float)\n', (1717, 1755), True, 'import numpy as np\n'), ((1772, 1796), 'numpy.ones', 'np.ones', (['(1, theta.size)'], {}), '((1, theta.size))\n', (1779, 1796), True, 'import numpy as np\n'), ((3493, 3513), 'numpy.abs', 'np.abs', (['focal_length'], {}), '(focal_length)\n', (3499, 3513), True, 'import numpy as np\n'), ((4616, 4633), 'numpy.sum', 'np.sum', (['r'], {'axis': '(0)'}), '(r, axis=0)\n', (4622, 4633), True, 'import numpy as np\n'), ((3696, 3737), 'numpy.arange', 'np.arange', (['image_shape[1]'], {'dtype': 'np.float'}), '(image_shape[1], dtype=np.float)\n', (3705, 3737), True, 'import numpy as np\n'), ((3747, 3788), 'numpy.arange', 'np.arange', (['image_shape[0]'], {'dtype': 'np.float'}), '(image_shape[0], dtype=np.float)\n', (3756, 3788), True, 'import numpy as np\n'), ((4367, 4401), 'numpy.divide', 'np.divide', (['(-sensor_coords[2,])', 'rho'], {}), '(-sensor_coords[2,], rho)\n', (4376, 4401), True, 'import numpy as np\n'), ((4280, 4310), 'numpy.square', 'np.square', (['sensor_coords[0, :]'], {}), '(sensor_coords[0, :])\n', (4289, 4310), True, 'import numpy as np\n'), ((4313, 4343), 'numpy.square', 'np.square', (['sensor_coords[1, :]'], {}), '(sensor_coords[1, :])\n', (4322, 4343), True, 'import numpy as np\n'), ((4081, 4096), 'numpy.ones', 'np.ones', (['u.size'], {}), '(u.size)\n', (4088, 4096), True, 'import numpy as np\n')] |
from __future__ import absolute_import, print_function, division
import os
import shutil
import unittest
from tempfile import mkdtemp
import numpy as np
import theano
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.misc.pkl_utils import dump, load, StripPickler
class T_dump_load(unittest.TestCase):
def setUp(self):
# Work in a temporary directory to avoid cluttering the repository
self.origdir = os.getcwd()
self.tmpdir = mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
# Get back to the original dir, and delete the temporary one
os.chdir(self.origdir)
if self.tmpdir is not None:
shutil.rmtree(self.tmpdir)
def test_dump_load_mrg(self):
rng = MRG_RandomStreams()
with open('test', 'wb') as f:
dump(rng, f)
with open('test', 'rb') as f:
rng = load(f)
assert type(rng) == MRG_RandomStreams
def test_dump_zip_names(self):
foo_1 = theano.shared(0, name='foo')
foo_2 = theano.shared(1, name='foo')
foo_3 = theano.shared(2, name='foo')
with open('model.zip', 'wb') as f:
dump((foo_1, foo_2, foo_3, np.array(3)), f)
keys = list(np.load('model.zip').keys())
assert keys == ['foo', 'foo_2', 'foo_3', 'array_0', 'pkl']
foo_3 = np.load('model.zip')['foo_3']
assert foo_3 == np.array(2)
with open('model.zip', 'rb') as f:
foo_1, foo_2, foo_3, array = load(f)
assert array == np.array(3)
class TestStripPickler(unittest.TestCase):
def setUp(self):
# Work in a temporary directory to avoid cluttering the repository
self.origdir = os.getcwd()
self.tmpdir = mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
# Get back to the original dir, and delete the temporary one
os.chdir(self.origdir)
if self.tmpdir is not None:
shutil.rmtree(self.tmpdir)
def test0(self):
with open('test.pkl', 'wb') as f:
m = theano.tensor.matrix()
dest_pkl = 'my_test.pkl'
f = open(dest_pkl, 'wb')
strip_pickler = StripPickler(f, protocol=-1)
strip_pickler.dump(m)
| [
"numpy.load",
"os.getcwd",
"theano.misc.pkl_utils.dump",
"theano.misc.pkl_utils.load",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"tempfile.mkdtemp",
"theano.shared",
"numpy.array",
"theano.misc.pkl_utils.StripPickler",
"shutil.rmtree",
"os.chdir",
"theano.tensor.matrix"
] | [((441, 452), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (450, 452), False, 'import os\n'), ((475, 484), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (482, 484), False, 'from tempfile import mkdtemp\n'), ((493, 514), 'os.chdir', 'os.chdir', (['self.tmpdir'], {}), '(self.tmpdir)\n', (501, 514), False, 'import os\n'), ((617, 639), 'os.chdir', 'os.chdir', (['self.origdir'], {}), '(self.origdir)\n', (625, 639), False, 'import os\n'), ((764, 783), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'MRG_RandomStreams', ([], {}), '()\n', (781, 783), False, 'from theano.sandbox.rng_mrg import MRG_RandomStreams\n'), ((1012, 1040), 'theano.shared', 'theano.shared', (['(0)'], {'name': '"""foo"""'}), "(0, name='foo')\n", (1025, 1040), False, 'import theano\n'), ((1057, 1085), 'theano.shared', 'theano.shared', (['(1)'], {'name': '"""foo"""'}), "(1, name='foo')\n", (1070, 1085), False, 'import theano\n'), ((1102, 1130), 'theano.shared', 'theano.shared', (['(2)'], {'name': '"""foo"""'}), "(2, name='foo')\n", (1115, 1130), False, 'import theano\n'), ((1720, 1731), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1729, 1731), False, 'import os\n'), ((1754, 1763), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (1761, 1763), False, 'from tempfile import mkdtemp\n'), ((1772, 1793), 'os.chdir', 'os.chdir', (['self.tmpdir'], {}), '(self.tmpdir)\n', (1780, 1793), False, 'import os\n'), ((1896, 1918), 'os.chdir', 'os.chdir', (['self.origdir'], {}), '(self.origdir)\n', (1904, 1918), False, 'import os\n'), ((688, 714), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (701, 714), False, 'import shutil\n'), ((835, 847), 'theano.misc.pkl_utils.dump', 'dump', (['rng', 'f'], {}), '(rng, f)\n', (839, 847), False, 'from theano.misc.pkl_utils import dump, load, StripPickler\n'), ((905, 912), 'theano.misc.pkl_utils.load', 'load', (['f'], {}), '(f)\n', (909, 912), False, 'from theano.misc.pkl_utils import dump, load, StripPickler\n'), ((1362, 1382), 'numpy.load', 'np.load', (['"""model.zip"""'], {}), "('model.zip')\n", (1369, 1382), True, 'import numpy as np\n'), ((1416, 1427), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1424, 1427), True, 'import numpy as np\n'), ((1512, 1519), 'theano.misc.pkl_utils.load', 'load', (['f'], {}), '(f)\n', (1516, 1519), False, 'from theano.misc.pkl_utils import dump, load, StripPickler\n'), ((1544, 1555), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1552, 1555), True, 'import numpy as np\n'), ((1967, 1993), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (1980, 1993), False, 'import shutil\n'), ((2074, 2096), 'theano.tensor.matrix', 'theano.tensor.matrix', ([], {}), '()\n', (2094, 2096), False, 'import theano\n'), ((2199, 2227), 'theano.misc.pkl_utils.StripPickler', 'StripPickler', (['f'], {'protocol': '(-1)'}), '(f, protocol=-1)\n', (2211, 2227), False, 'from theano.misc.pkl_utils import dump, load, StripPickler\n'), ((1213, 1224), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1221, 1224), True, 'import numpy as np\n'), ((1250, 1270), 'numpy.load', 'np.load', (['"""model.zip"""'], {}), "('model.zip')\n", (1257, 1270), True, 'import numpy as np\n')] |
from __future__ import (print_function, absolute_import)
import numpy as np
from .profiles import get_profiles
def interpolate_profile(nlower, nupper, nelec, temp, with_doppler=False):
"""
Interpolate profile tables of Lemke 1997 to get a Stark broadened line profile
Parameters
----------
nlower : int
lower level of transition
nupper : int
upper level of transition
nelec : float
number density of electrons in cm**-3
temp : float
temperature in K
Returns
-------
log_alpha : `np.ndarray`
alpha values
profile : `np.ndarray`
stark profile
fo : float
conversion between delta-alpha and delta-lambda
"""
meta, flags, data = get_profiles(nlower, nupper, with_doppler)
f0 = 1.25e-9*nelec**(2./3.) # normal field strength f0 (in esu)
log_ne = np.log10(nelec)
log_t = np.log10(temp)
log_ne_index = (log_ne - meta.log_ne_min) / meta.log_ne_increment
log_t_index = (log_t - meta.log_t_min) / meta.log_t_increment
low_ne_index = int(np.floor(log_ne_index))
high_ne_index = int(np.ceil(log_ne_index))
low_t_index = int(np.floor(log_t_index))
high_t_index = int(np.ceil(log_t_index))
# check we are within bounds
if low_ne_index < 0 or high_ne_index > meta.num_ne:
raise ValueError("electron density outside allowed range 10**10 to 10**18 cm**-3")
if low_t_index < 0 or high_t_index > meta.num_temp:
raise ValueError("temperature outside allowed range 2500 to 160000 K")
# points bracketing requested values
ne1 = meta.log_ne_min + low_ne_index*meta.log_ne_increment
ne2 = meta.log_ne_min + high_ne_index*meta.log_ne_increment
t1 = meta.log_t_min + low_t_index*meta.log_t_increment
t2 = meta.log_t_min + high_t_index*meta.log_t_increment
# profiles at these points
p1 = data[low_ne_index, low_t_index]
p2 = data[high_ne_index, low_t_index]
p3 = data[low_ne_index, high_t_index]
p4 = data[high_ne_index, high_t_index]
if ne1 == ne2 and t1 == t2:
# no interpolation needed
profile = p1
elif ne1 == ne2:
# interpolate in temp
profile = p1 + (p3 - p1) * (log_t - t1) / (t2 - t1)
elif t1 == t2:
# interpolate in nelec
profile = p1 + (p2 - p1) * (log_ne - ne1) / (ne2 - ne1)
else:
# otherwise do the full bilinear interpolation
# interpolate in temp at low_ne
r1 = p1 + (p3 - p1) * (log_t - t1) / (t2 - t1)
# interpolate in temp at high_ne
r3 = p2 + (p4 - p2) * (log_t - t1) / (t2 - t1)
# interpolate in ne
profile = r1 + (r3-r1) * (log_ne - ne1) / (ne2 - ne1)
# OK - now find matching alpha values
alpha = meta.log_alpha_min + np.arange(meta.num_alpha) * meta.log_alpha_increment
return alpha, profile, f0
| [
"numpy.log10",
"numpy.arange",
"numpy.ceil",
"numpy.floor"
] | [((871, 886), 'numpy.log10', 'np.log10', (['nelec'], {}), '(nelec)\n', (879, 886), True, 'import numpy as np\n'), ((899, 913), 'numpy.log10', 'np.log10', (['temp'], {}), '(temp)\n', (907, 913), True, 'import numpy as np\n'), ((1074, 1096), 'numpy.floor', 'np.floor', (['log_ne_index'], {}), '(log_ne_index)\n', (1082, 1096), True, 'import numpy as np\n'), ((1122, 1143), 'numpy.ceil', 'np.ceil', (['log_ne_index'], {}), '(log_ne_index)\n', (1129, 1143), True, 'import numpy as np\n'), ((1167, 1188), 'numpy.floor', 'np.floor', (['log_t_index'], {}), '(log_t_index)\n', (1175, 1188), True, 'import numpy as np\n'), ((1213, 1233), 'numpy.ceil', 'np.ceil', (['log_t_index'], {}), '(log_t_index)\n', (1220, 1233), True, 'import numpy as np\n'), ((2774, 2799), 'numpy.arange', 'np.arange', (['meta.num_alpha'], {}), '(meta.num_alpha)\n', (2783, 2799), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
import torch
from torch.utils.data import Dataset
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_root_logger
from mmdeploy.utils.config_utils import get_input_shape
from .mmclassification import MMCLS_TASK
def process_model_config(model_cfg: mmcv.Config,
imgs: Union[str, np.ndarray],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmcv.Config): The model config.
imgs (str | np.ndarray): Input image(s), accepted data type are `str`,
`np.ndarray`.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmcv.Config: the model config after processing.
"""
cfg = model_cfg.deepcopy()
if isinstance(imgs, str):
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
else:
if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
cfg.data.test.pipeline.pop(0)
# check whether input_shape is valid
if input_shape is not None:
if 'crop_size' in cfg.data.test.pipeline[2]:
crop_size = cfg.data.test.pipeline[2]['crop_size']
if tuple(input_shape) != (crop_size, crop_size):
logger = get_root_logger()
logger.warning(
f'`input shape` should be equal to `crop_size`: {crop_size},\
but given: {input_shape}')
return cfg
@MMCLS_TASK.register_module(Task.CLASSIFICATION.value)
class Classification(BaseTask):
"""Classification task class.
Args:
model_cfg (mmcv.Config): Original PyTorch model config file.
deploy_cfg (mmcv.Config): Deployment config file or loaded Config
object.
device (str): A string represents device type.
"""
def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str):
super(Classification, self).__init__(model_cfg, deploy_cfg, device)
def init_backend_model(self,
model_files: Sequence[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize backend model.
Args:
model_files (Sequence[str]): Input model files.
Returns:
nn.Module: An initialized backend model.
"""
from .classification_model import build_classification_model
model = build_classification_model(
model_files, self.model_cfg, self.deploy_cfg, device=self.device)
return model.eval()
def init_pytorch_model(self,
model_checkpoint: Optional[str] = None,
cfg_options: Optional[Dict] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.
Args:
model_checkpoint (str): The checkpoint file of torch model,
Default: None.
cfg_options (dict): Optional config key-pair parameters.
Returns:
nn.Module: An initialized torch model generated by OpenMMLab
codebases.
"""
from mmcls.apis import init_model
model = init_model(self.model_cfg, model_checkpoint, self.device,
cfg_options)
return model.eval()
def create_input(self,
imgs: Union[str, np.ndarray],
input_shape: Optional[Sequence[int]] = None) \
-> Tuple[Dict, torch.Tensor]:
"""Create input for classifier.
Args:
imgs (Any): Input image(s), accepted data type are `str`,
`np.ndarray`, `torch.Tensor`.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
tuple: (data, img), meta information for the input image and input.
"""
from mmcls.datasets.pipelines import Compose
from mmcv.parallel import collate, scatter
cfg = process_model_config(self.model_cfg, imgs, input_shape)
if isinstance(imgs, str):
data = dict(img_info=dict(filename=imgs), img_prefix=None)
else:
data = dict(img=imgs)
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
data['img'] = [data['img']]
if self.device != 'cpu':
data = scatter(data, [self.device])[0]
return data, data['img']
def visualize(self,
model: torch.nn.Module,
image: Union[str, np.ndarray],
result: list,
output_file: str,
window_name: str = '',
show_result: bool = False):
"""Visualize predictions of a model.
Args:
model (nn.Module): Input model.
image (str | np.ndarray): Input image to draw predictions on.
result (list): A list of predictions.
output_file (str): Output file to save drawn image.
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows.
Default: False.
"""
show_img = mmcv.imread(image) if isinstance(image, str) else image
output_file = None if show_result else output_file
pred_score = np.max(result)
pred_label = np.argmax(result)
result = {'pred_label': pred_label, 'pred_score': float(pred_score)}
result['pred_class'] = model.CLASSES[result['pred_label']]
return model.show_result(
show_img,
result,
show=show_result,
win_name=window_name,
out_file=output_file)
@staticmethod
def run_inference(model: torch.nn.Module,
model_inputs: Dict[str, torch.Tensor]) -> list:
"""Run inference once for a classification model of mmcls.
Args:
model (nn.Module): Input model.
model_inputs (dict): A dict containing model inputs tensor and
meta info.
Returns:
list: The predictions of model inference.
"""
return model(**model_inputs, return_loss=False)
@staticmethod
def get_partition_cfg(partition_type: str) -> Dict:
"""Get a certain partition config.
Args:
partition_type (str): A string specifying partition type.
Returns:
dict: A dictionary of partition config.
"""
raise NotImplementedError('Not supported yet.')
@staticmethod
def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor:
"""Get input tensor from input data.
Args:
input_data (tuple): Input data containing meta info and image
tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
return input_data['img']
@staticmethod
def evaluate_outputs(model_cfg: mmcv.Config,
outputs: list,
dataset: Dataset,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None) -> None:
"""Perform post-processing to predictions of model.
Args:
model_cfg (mmcv.Config): The model config.
outputs (list): A list of predictions of model inference.
dataset (Dataset): Input dataset to run test.
metrics (str): Evaluation metrics, which depends on
the codebase and the dataset, e.g., "mAP" in mmcls.
out (str): Output result file in pickle format, Default: None.
metric_options (dict): Custom options for evaluation, will be
kwargs for dataset.evaluate() function. Default: None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Default: False.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
import warnings
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file, log_level=logging.INFO)
if metrics:
results = dataset.evaluate(outputs, metrics, metric_options)
for k, v in results.items():
logger.info(f'{k} : {v:.2f}')
else:
warnings.warn('Evaluation metrics are not specified.')
scores = np.vstack(outputs)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [dataset.CLASSES[lb] for lb in pred_label]
results = {
'pred_score': pred_score,
'pred_label': pred_label,
'pred_class': pred_class
}
if not out:
logger.info('the predicted result for the first element is '
f'pred_score = {pred_score[0]:.2f}, '
f'pred_label = {pred_label[0]} '
f'and pred_class = {pred_class[0]}. '
'Specify --out to save all results to files.')
if out:
logger.debug(f'writing results to {out}')
mmcv.dump(results, out)
def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Return:
dict: Composed of the preprocess information.
"""
input_shape = get_input_shape(self.deploy_cfg)
cfg = process_model_config(self.model_cfg, '', input_shape)
preprocess = cfg.data.test.pipeline
return preprocess
def get_postprocess(self) -> Dict:
"""Get the postprocess information for SDK.
Return:
dict: Composed of the postprocess information.
"""
postprocess = self.model_cfg.model.head
assert 'topk' in postprocess, 'model config lack topk'
postprocess.topk = max(postprocess.topk)
return postprocess
def get_model_name(self) -> str:
"""Get the model name.
Return:
str: the name of the model.
"""
assert 'backbone' in self.model_cfg.model, 'backbone not in model '
'config'
assert 'type' in self.model_cfg.model.backbone, 'backbone contains '
'no type'
name = self.model_cfg.model.backbone.type.lower()
return name
| [
"mmcv.utils.get_logger",
"numpy.argmax",
"mmcv.parallel.scatter",
"mmdeploy.utils.config_utils.get_input_shape",
"mmcls.datasets.pipelines.Compose",
"numpy.max",
"mmcv.dump",
"mmdeploy.utils.get_root_logger",
"warnings.warn",
"mmcls.apis.init_model",
"mmcv.parallel.collate",
"mmcv.imread",
"... | [((3527, 3597), 'mmcls.apis.init_model', 'init_model', (['self.model_cfg', 'model_checkpoint', 'self.device', 'cfg_options'], {}), '(self.model_cfg, model_checkpoint, self.device, cfg_options)\n', (3537, 3597), False, 'from mmcls.apis import init_model\n'), ((4615, 4646), 'mmcls.datasets.pipelines.Compose', 'Compose', (['cfg.data.test.pipeline'], {}), '(cfg.data.test.pipeline)\n', (4622, 4646), False, 'from mmcls.datasets.pipelines import Compose\n'), ((4697, 4731), 'mmcv.parallel.collate', 'collate', (['[data]'], {'samples_per_gpu': '(1)'}), '([data], samples_per_gpu=1)\n', (4704, 4731), False, 'from mmcv.parallel import collate, scatter\n'), ((5824, 5838), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (5830, 5838), True, 'import numpy as np\n'), ((5860, 5877), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (5869, 5877), True, 'import numpy as np\n'), ((8953, 9014), 'mmcv.utils.get_logger', 'get_logger', (['"""test"""'], {'log_file': 'log_file', 'log_level': 'logging.INFO'}), "('test', log_file=log_file, log_level=logging.INFO)\n", (8963, 9014), False, 'from mmcv.utils import get_logger\n'), ((10321, 10353), 'mmdeploy.utils.config_utils.get_input_shape', 'get_input_shape', (['self.deploy_cfg'], {}), '(self.deploy_cfg)\n', (10336, 10353), False, 'from mmdeploy.utils.config_utils import get_input_shape\n'), ((5688, 5706), 'mmcv.imread', 'mmcv.imread', (['image'], {}), '(image)\n', (5699, 5706), False, 'import mmcv\n'), ((9222, 9276), 'warnings.warn', 'warnings.warn', (['"""Evaluation metrics are not specified."""'], {}), "('Evaluation metrics are not specified.')\n", (9235, 9276), False, 'import warnings\n'), ((9298, 9316), 'numpy.vstack', 'np.vstack', (['outputs'], {}), '(outputs)\n', (9307, 9316), True, 'import numpy as np\n'), ((9342, 9364), 'numpy.max', 'np.max', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (9348, 9364), True, 'import numpy as np\n'), ((9390, 9415), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (9399, 9415), True, 'import numpy as np\n'), ((10098, 10121), 'mmcv.dump', 'mmcv.dump', (['results', 'out'], {}), '(results, out)\n', (10107, 10121), False, 'import mmcv\n'), ((1589, 1606), 'mmdeploy.utils.get_root_logger', 'get_root_logger', ([], {}), '()\n', (1604, 1606), False, 'from mmdeploy.utils import Task, get_root_logger\n'), ((4820, 4848), 'mmcv.parallel.scatter', 'scatter', (['data', '[self.device]'], {}), '(data, [self.device])\n', (4827, 4848), False, 'from mmcv.parallel import collate, scatter\n')] |
import argparse
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch
#from apex import amp
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import random
import re
import json
from transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel, BertConfig, get_linear_schedule_with_warmup
def get_class_accuracy(logits, labels):
predictions = np.argmax(F.softmax(logits,dim=1).cpu().data.numpy(), axis=1)
return np.float32(np.sum(predictions=labels)) / len(labels), len(labels)
def get_position_accuracy(logits, labels):
predictions = np.argmax(F.softmax(logits,dim=1).cpu().data.numpy(), axis=1)
total_num = 0
sum_correct = 0
for i in range(len(labels)):
if labels[i] >= 0:
total_num += 1
if predictions[i] == labels[i]:
sum_correct += 1
if total_num == 0:
total_num = 1e-7
return np.float32(sum_correct) / total_num, total_num
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TFQADataset(Dataset):
def __init__(self, id_list):
self.id_list=id_list
def __len__(self):
return len(self.id_list)
def __getitem__(self, index):
return self.id_list[index]
class Collator(object):
def __init__(self, data_dict, tokenizer, max_seq_len=384, max_question_len=64):
self.data_dict = data_dict
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.max_question_len = max_question_len
def _get_positive_input_ids(self, data, question_tokens):
max_answer_tokens = self.max_seq_len-len(question_tokens)-3 # [CLS],[SEP],[SEP]
candidate_start = data['positive_start']
candidate_end = data['positive_end']
candidate_words = data['positive_text']
words_to_tokens_index = []
candidate_tokens = []
for i, word in enumerate(candidate_words):
words_to_tokens_index.append(len(candidate_tokens))
if re.match(r'<.+>', word): # remove paragraph tag
continue
tokens = self.tokenizer.tokenize(word) # Alfred creating tokens
if len(candidate_tokens)+len(tokens) > max_answer_tokens:
break
candidate_tokens += tokens
start_position = -1
end_position = -1
if data['annotations'][0]['short_answers']:
start_position1 = data['annotations'][0]['short_answers'][0]['start_token']
end_position1 = data['annotations'][0]['short_answers'][0]['end_token']
if (start_position1 >= candidate_start and end_position1 <= candidate_end) and ((end_position1-candidate_start) < len(words_to_tokens_index)):
start_position = words_to_tokens_index[start_position1-candidate_start]+len(question_tokens)+2
end_position = words_to_tokens_index[end_position1-candidate_start]+len(question_tokens)+2
return candidate_tokens, start_position, end_position
def _get_negative_input_ids(self, data, question_tokens):
max_answer_tokens = self.max_seq_len-len(question_tokens)-3 # [CLS],[SEP],[SEP]
candidate_start = data['negative_start']
candidate_end = data['negative_end']
candidate_words = data['negative_text']
words_to_tokens_index = []
candidate_tokens = []
for i, word in enumerate(candidate_words):
words_to_tokens_index.append(len(candidate_tokens))
if re.match(r'<.+>', word): # remove paragraph tag
continue
tokens = self.tokenizer.tokenize(word)
if len(candidate_tokens)+len(tokens) > max_answer_tokens:
break
candidate_tokens += tokens
start_position = -1
end_position = -1
return candidate_tokens, start_position, end_position
def __call__(self, batch_ids):
batch_size = 2*len(batch_ids)
batch_input_ids = np.zeros((batch_size, self.max_seq_len), dtype=np.int64)
batch_token_type_ids = np.ones((batch_size, self.max_seq_len), dtype=np.int64)
batch_y_start = np.zeros((batch_size,), dtype=np.int64)
batch_y_end = np.zeros((batch_size,), dtype=np.int64)
batch_y = np.zeros((batch_size,), dtype=np.int64)
for i, doc_id in enumerate(batch_ids):
data = self.data_dict[doc_id]
# get label
annotations = data['annotations'][0]
if annotations['yes_no_answer'] == 'YES':
batch_y[i*2] = 4
elif annotations['yes_no_answer'] == 'NO':
batch_y[i*2] = 3
elif annotations['short_answers']:
batch_y[i*2] = 2
elif annotations['long_answer']['candidate_index'] != -1:
batch_y[i*2] = 1
batch_y[i*2+1] = 0
# get positive and negative samples
question_tokens = self.tokenizer.tokenize(data['question_text'])[:self.max_question_len]
# positive
answer_tokens, start_position, end_position = self._get_positive_input_ids(data, question_tokens)
input_tokens = ['[CLS]'] + question_tokens + ['[SEP]'] + answer_tokens + ['[SEP]']
#if annotations['short_answers']:
# print(data['question_text'],"[AAA]",input_tokens[start_position:end_position])
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
batch_input_ids[i*2, :len(input_ids)] = input_ids
batch_token_type_ids[i*2, :len(input_ids)] = [0 if k<=input_ids.index(102) else 1 for k in range(len(input_ids))]
batch_y_start[i*2] = start_position
batch_y_end[i*2] = end_position
# negative
answer_tokens, start_position, end_position = self._get_negative_input_ids(data, question_tokens)
input_tokens = ['[CLS]'] + question_tokens + ['[SEP]'] + answer_tokens + ['[SEP]']
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
batch_token_type_ids[i*2+1, :len(input_ids)] = [0 if k<=input_ids.index(102) else 1 for k in range(len(input_ids))]
batch_input_ids[i*2+1, :len(input_ids)] = input_ids
batch_y_start[i*2+1] = start_position
batch_y_end[i*2+1] = end_position
batch_attention_mask = batch_input_ids > 0
return torch.from_numpy(batch_input_ids), torch.from_numpy(batch_attention_mask), torch.from_numpy(batch_token_type_ids), torch.LongTensor(batch_y_start), torch.LongTensor(batch_y_end), torch.LongTensor(batch_y)
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for QA and classification tasks.
Parameters
----------
config : transformers.BertConfig. Configuration class for BERT.
Returns
-------
start_logits : torch.Tensor with shape (batch_size, sequence_size).
Starting scores of each tokens.
end_logits : torch.Tensor with shape (batch_size, sequence_size).
Ending scores of each tokens.
classifier_logits : torch.Tensor with shape (batch_size, num_classes).
Classification scores of each labels.
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2) # start/end
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
pooled_output = outputs[1]
# predict start & end position
qa_logits = self.qa_outputs(sequence_output)
start_logits, end_logits = qa_logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
# classification
pooled_output = self.dropout(pooled_output)
classifier_logits = self.classifier(pooled_output)
return start_logits, end_logits, classifier_logits
def loss_fn(preds, labels):
start_preds, end_preds, class_preds = preds
start_labels, end_labels, class_labels = labels
start_loss = nn.CrossEntropyLoss(ignore_index=-1)(start_preds, start_labels)
end_loss = nn.CrossEntropyLoss(ignore_index=-1)(end_preds, end_labels)
class_loss = nn.CrossEntropyLoss()(class_preds, class_labels)
return start_loss, end_loss, class_loss
def random_sample_negative_candidates(distribution):
temp = np.random.random()
value = 0.
for index in range(len(distribution)):
value += distribution[index]
if value > temp:
break
return index
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
args = parser.parse_args()
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.device = device
seed = 1001
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# prepare input
json_dir = '../../input/simplified-nq-train.jsonl'
max_data = 9999999999
id_list = []
data_dict = {}
with open(json_dir) as f:
for n, line in tqdm(enumerate(f)):
if n > max_data:
break
data = json.loads(line)
is_pos = False
annotations = data['annotations'][0]
if annotations['yes_no_answer'] == 'YES':
is_pos = True
elif annotations['yes_no_answer'] == 'NO':
is_pos = True
elif annotations['short_answers']:
is_pos = True
elif annotations['long_answer']['candidate_index'] != -1:
is_pos = True
if is_pos and len(data['long_answer_candidates'])>1:
data_id = data['example_id']
id_list.append(data_id)
# uniform sampling
distribution = np.ones((len(data['long_answer_candidates']),),dtype=np.float32)
if is_pos:
distribution[data['annotations'][0]['long_answer']['candidate_index']] = 0.
distribution /= len(distribution)
negative_candidate_index = random_sample_negative_candidates(distribution)
#
doc_words = data['document_text'].split()
# negative
candidate = data['long_answer_candidates'][negative_candidate_index]
negative_candidate_words = doc_words[candidate['start_token']:candidate['end_token']]
negative_candidate_start = candidate['start_token']
negative_candidate_end = candidate['end_token']
# positive
candidate = data['long_answer_candidates'][annotations['long_answer']['candidate_index']]
positive_candidate_words = doc_words[candidate['start_token']:candidate['end_token']]
positive_candidate_start = candidate['start_token']
positive_candidate_end = candidate['end_token']
# initialize data_dict
data_dict[data_id] = {
'question_text': data['question_text'],
'annotations': data['annotations'],
'positive_text': positive_candidate_words,
'positive_start': positive_candidate_start,
'positive_end': positive_candidate_end,
'negative_text': negative_candidate_words,
'negative_start': negative_candidate_start,
'negative_end': negative_candidate_end,
}
print(len(id_list))
random.shuffle(id_list)
# hyperparameters
max_seq_len = 384
max_question_len = 64
learning_rate = 0.00002
batch_size = 4
ep = 0
# build model
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model_path = '../../huggingface_pretrained/bert-base-uncased/'
config = BertConfig.from_pretrained(model_path)
config.num_labels = 5
tokenizer = BertTokenizer.from_pretrained(model_path, do_lower_case=True) # Alfred instantiation of BerkTokenizer
model = BertForQuestionAnswering.from_pretrained(model_path, config=config)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1",verbosity=0)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# training
# iterator for training
train_datagen = TFQADataset(id_list=id_list)
train_sampler = DistributedSampler(train_datagen)
train_collate = Collator(data_dict=data_dict,
tokenizer=tokenizer, # Alfred adding tokenizer
max_seq_len=max_seq_len,
max_question_len=max_question_len)
train_generator = DataLoader(dataset=train_datagen,
sampler=train_sampler,
collate_fn=train_collate,
batch_size=batch_size,
num_workers=3,
pin_memory=True)
# train
losses1 = AverageMeter() # start
losses2 = AverageMeter() # end
losses3 = AverageMeter() # class
accuracies1 = AverageMeter() # start
accuracies2 = AverageMeter() # end
accuracies3 = AverageMeter() # class
model.train()
for j,(batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_y_start, batch_y_end, batch_y) in enumerate(train_generator):
batch_input_ids = batch_input_ids.cuda()
batch_attention_mask = batch_attention_mask.cuda()
batch_token_type_ids = batch_token_type_ids.cuda()
labels1 = batch_y_start.cuda()
labels2 = batch_y_end.cuda()
labels3 = batch_y.cuda()
logits1, logits2, logits3 = model(batch_input_ids, batch_attention_mask, batch_token_type_ids)
y_true = (batch_y_start, batch_y_end, batch_y)
loss1, loss2, loss3 = loss_fn((logits1, logits2, logits3), (labels1, labels2, labels3))
loss = loss1+loss2+loss3
acc1, n_position1 = get_position_accuracy(logits1, labels1)
acc2, n_position2 = get_position_accuracy(logits2, labels2)
acc3, n_position3 = get_position_accuracy(logits3, labels3)
losses1.update(loss1.item(), n_position1)
losses2.update(loss2.item(), n_position2)
losses3.update(loss3.item(), n_position3)
accuracies1.update(acc1, n_position1)
accuracies2.update(acc2, n_position2)
accuracies3.update(acc3, n_position2)
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if args.local_rank == 0:
print('epoch: {}, train_loss1: {}, train_loss2: {}, train_loss3: {}, train_acc1: {}, train_acc2: {}, train_acc3: {}'.format(ep,losses1.avg,losses2.avg,losses3.avg,accuracies1.avg,accuracies2.avg,accuracies3.avg), flush=True)
out_dir = 'weights/epoch0/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
torch.save(model.module.state_dict(), out_dir+'pytorch_model.bin')
if __name__ == "__main__":
main()
| [
"torch.nn.Dropout",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"random.shuffle",
"transformers.BertModel",
"numpy.ones",
"torch.device",
"json.loads",
"torch.utils.data.DataLoader",
"torch.nn.parallel.DistributedDataParallel",
"os.path.exists",
"torch.utils.data.distributed... | [((9488, 9506), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9504, 9506), True, 'import numpy as np\n'), ((9689, 9714), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9712, 9714), False, 'import argparse\n'), ((9867, 9905), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (9888, 9905), False, 'import torch\n'), ((9919, 9956), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (9931, 9956), False, 'import torch\n'), ((9961, 10013), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (9997, 10013), False, 'import torch\n'), ((10060, 10077), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10071, 10077), False, 'import random\n'), ((10082, 10102), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10096, 10102), True, 'import numpy as np\n'), ((10107, 10130), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (10124, 10130), False, 'import torch\n'), ((10135, 10163), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (10157, 10163), False, 'import torch\n'), ((13085, 13108), 'random.shuffle', 'random.shuffle', (['id_list'], {}), '(id_list)\n', (13099, 13108), False, 'import random\n'), ((13510, 13548), 'transformers.BertConfig.from_pretrained', 'BertConfig.from_pretrained', (['model_path'], {}), '(model_path)\n', (13536, 13548), False, 'from transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel, BertConfig, get_linear_schedule_with_warmup\n'), ((13591, 13652), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_path'], {'do_lower_case': '(True)'}), '(model_path, do_lower_case=True)\n', (13620, 13652), False, 'from transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel, BertConfig, get_linear_schedule_with_warmup\n'), ((14124, 14267), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], output_device=args.local_rank, find_unused_parameters=True)\n', (14165, 14267), False, 'import torch\n'), ((14400, 14433), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_datagen'], {}), '(train_datagen)\n', (14418, 14433), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((14704, 14846), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_datagen', 'sampler': 'train_sampler', 'collate_fn': 'train_collate', 'batch_size': 'batch_size', 'num_workers': '(3)', 'pin_memory': '(True)'}), '(dataset=train_datagen, sampler=train_sampler, collate_fn=\n train_collate, batch_size=batch_size, num_workers=3, pin_memory=True)\n', (14714, 14846), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((4543, 4599), 'numpy.zeros', 'np.zeros', (['(batch_size, self.max_seq_len)'], {'dtype': 'np.int64'}), '((batch_size, self.max_seq_len), dtype=np.int64)\n', (4551, 4599), True, 'import numpy as np\n'), ((4631, 4686), 'numpy.ones', 'np.ones', (['(batch_size, self.max_seq_len)'], {'dtype': 'np.int64'}), '((batch_size, self.max_seq_len), dtype=np.int64)\n', (4638, 4686), True, 'import numpy as np\n'), ((4712, 4751), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {'dtype': 'np.int64'}), '((batch_size,), dtype=np.int64)\n', (4720, 4751), True, 'import numpy as np\n'), ((4774, 4813), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {'dtype': 'np.int64'}), '((batch_size,), dtype=np.int64)\n', (4782, 4813), True, 'import numpy as np\n'), ((4832, 4871), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {'dtype': 'np.int64'}), '((batch_size,), dtype=np.int64)\n', (4840, 4871), True, 'import numpy as np\n'), ((7871, 7888), 'transformers.BertModel', 'BertModel', (['config'], {}), '(config)\n', (7880, 7888), False, 'from transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel, BertConfig, get_linear_schedule_with_warmup\n'), ((7915, 7947), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', '(2)'], {}), '(config.hidden_size, 2)\n', (7924, 7947), True, 'import torch.nn as nn\n'), ((7984, 8022), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7994, 8022), True, 'import torch.nn as nn\n'), ((8049, 8097), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (8058, 8097), True, 'import torch.nn as nn\n'), ((9173, 9209), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (9192, 9209), True, 'import torch.nn as nn\n'), ((9252, 9288), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (9271, 9288), True, 'import torch.nn as nn\n'), ((9329, 9350), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (9348, 9350), True, 'import torch.nn as nn\n'), ((13401, 13428), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (13426, 13428), False, 'import torch\n'), ((13907, 13934), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (13932, 13934), False, 'import torch\n'), ((1126, 1149), 'numpy.float32', 'np.float32', (['sum_correct'], {}), '(sum_correct)\n', (1136, 1149), True, 'import numpy as np\n'), ((2552, 2574), 're.match', 're.match', (['"""<.+>"""', 'word'], {}), "('<.+>', word)\n", (2560, 2574), False, 'import re\n'), ((4053, 4075), 're.match', 're.match', (['"""<.+>"""', 'word'], {}), "('<.+>', word)\n", (4061, 4075), False, 'import re\n'), ((6959, 6992), 'torch.from_numpy', 'torch.from_numpy', (['batch_input_ids'], {}), '(batch_input_ids)\n', (6975, 6992), False, 'import torch\n'), ((6994, 7032), 'torch.from_numpy', 'torch.from_numpy', (['batch_attention_mask'], {}), '(batch_attention_mask)\n', (7010, 7032), False, 'import torch\n'), ((7034, 7072), 'torch.from_numpy', 'torch.from_numpy', (['batch_token_type_ids'], {}), '(batch_token_type_ids)\n', (7050, 7072), False, 'import torch\n'), ((7074, 7105), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_start'], {}), '(batch_y_start)\n', (7090, 7105), False, 'import torch\n'), ((7107, 7136), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_end'], {}), '(batch_y_end)\n', (7123, 7136), False, 'import torch\n'), ((7138, 7163), 'torch.LongTensor', 'torch.LongTensor', (['batch_y'], {}), '(batch_y)\n', (7154, 7163), False, 'import torch\n'), ((10493, 10509), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (10503, 10509), False, 'import json\n'), ((16934, 16957), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (16948, 16957), False, 'import os\n'), ((16971, 16991), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (16982, 16991), False, 'import os\n'), ((686, 712), 'numpy.sum', 'np.sum', ([], {'predictions': 'labels'}), '(predictions=labels)\n', (692, 712), True, 'import numpy as np\n'), ((612, 636), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (621, 636), True, 'import torch.nn.functional as F\n'), ((813, 837), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (822, 837), True, 'import torch.nn.functional as F\n')] |
# Copyright (C) 2014 <NAME>
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from spglib import (
get_stabilized_reciprocal_mesh, relocate_BZ_grid_address,
get_symmetry_dataset, get_pointgroup)
from phonopy.structure.brillouin_zone import get_qpoints_in_Brillouin_zone
from phonopy.structure.symmetry import (
get_lattice_vector_equivalence, get_pointgroup_operations,
collect_unique_rotations)
from phonopy.structure.cells import (
get_primitive_matrix_by_centring, estimate_supercell_matrix,
estimate_supercell_matrix_from_pointgroup, determinant)
from phonopy.structure.snf import SNF3x3
from phonopy.harmonic.force_constants import similarity_transformation
def length2mesh(length, lattice, rotations=None):
"""Convert length to mesh for q-point sampling
This conversion for each reciprocal axis follows VASP convention by
N = max(1, int(l * |a|^* + 0.5))
'int' means rounding down, not rounding to nearest integer.
Parameters
----------
length : float
Length having the unit of direct space length.
lattice : array_like
Basis vectors of primitive cell in row vectors.
dtype='double', shape=(3, 3)
rotations: array_like, optional
Rotation matrices in real space. When given, mesh numbers that are
symmetrically reasonable are returned. Default is None.
dtype='intc', shape=(rotations, 3, 3)
Returns
-------
array_like
dtype=int, shape=(3,)
"""
rec_lattice = np.linalg.inv(lattice)
rec_lat_lengths = np.sqrt(np.diagonal(np.dot(rec_lattice.T, rec_lattice)))
mesh_numbers = np.rint(rec_lat_lengths * length).astype(int)
if rotations is not None:
reclat_equiv = get_lattice_vector_equivalence(
[r.T for r in np.array(rotations)])
m = mesh_numbers
mesh_equiv = [m[1] == m[2], m[2] == m[0], m[0] == m[1]]
for i, pair in enumerate(([1, 2], [2, 0], [0, 1])):
if reclat_equiv[i] and not mesh_equiv:
m[pair] = max(m[pair])
return np.maximum(mesh_numbers, [1, 1, 1])
def get_qpoints(mesh_numbers,
reciprocal_lattice, # column vectors
q_mesh_shift=None, # Monkhorst-Pack style grid shift
is_gamma_center=True,
is_time_reversal=True,
fit_in_BZ=True,
rotations=None, # Point group operations in real space
is_mesh_symmetry=True):
gp = GridPoints(mesh_numbers,
reciprocal_lattice,
q_mesh_shift=q_mesh_shift,
is_gamma_center=is_gamma_center,
is_time_reversal=is_time_reversal,
fit_in_BZ=fit_in_BZ,
rotations=rotations,
is_mesh_symmetry=is_mesh_symmetry)
return gp.qpoints, gp.weights
def extract_ir_grid_points(grid_mapping_table):
ir_grid_points = np.array(np.unique(grid_mapping_table),
dtype=grid_mapping_table.dtype)
weights = np.zeros_like(grid_mapping_table)
for i, gp in enumerate(grid_mapping_table):
weights[gp] += 1
ir_weights = np.array(weights[ir_grid_points], dtype='intc')
return ir_grid_points, ir_weights
class GridPoints(object):
"""Class to generate irreducible grid points on uniform mesh grids
Attributes
----------
mesh_numbers: ndarray
Mesh numbers along a, b, c axes.
dtype='intc'
shape=(3,)
reciprocal_lattice: array_like
Basis vectors in reciprocal space. a*, b*, c* are given in column
vectors.
dtype='double'
shape=(3, 3)
qpoints: ndarray
q-points in reduced coordinates of reciprocal lattice
dtype='double'
shape=(ir-grid points, 3)
weights: ndarray
Geometric q-point weights. Its sum is the number of grid points.
dtype='intc'
shape=(ir-grid points,)
grid_address: ndarray
Addresses of all grid points represented by integers.
dtype='intc'
shape=(prod(mesh_numbers), 3)
ir_grid_points: ndarray
Indices of irreducible grid points in grid_address.
dtype='uintp', shape=(ir-grid points,)
grid_mapping_table: ndarray
Index mapping table from all grid points to ir-grid points.
dtype='uintp', shape=(prod(mesh_numbers),)
"""
def __init__(self,
mesh_numbers,
reciprocal_lattice,
q_mesh_shift=None, # Monkhorst-Pack style grid shift
is_gamma_center=True,
is_time_reversal=True,
fit_in_BZ=True,
rotations=None, # Point group operations in real space
is_mesh_symmetry=True): # Except for time reversal symmetry
"""
Note
----
Uniform mesh grids are made according to Monkhorst-Pack scheme, i.e.,
for odd (even) numbers, centre are (are not) sampled. The Gamma-centre
sampling is supported by ``is_gamma_center=True``.
Parameters
----------
mesh_numbers: array_like
Mesh numbers along a, b, c axes.
dtype='intc'
shape=(3, )
reciprocal_lattice: array_like
Basis vectors in reciprocal space. a*, b*, c* are given in column
vectors.
dtype='double'
shape=(3, 3)
q_mesh_shift: array_like, optional, default None (no shift)
Mesh shifts along a*, b*, c* axes with respect to neighboring grid
points from the original mesh (Monkhorst-Pack or Gamma center).
0.5 gives half grid shift. Normally 0 or 0.5 is given.
Otherwise q-points symmetry search is not performed.
dtype='double'
shape=(3, )
is_gamma_center: bool, default False
Uniform mesh grids are generated centring at Gamma point but not
the Monkhorst-Pack scheme.
is_time_reversal: bool, optional, default True
Time reversal symmetry is considered in symmetry search. By this,
inversion symmetry is always included.
fit_in_BZ: bool, optional, default True
rotations: array_like, default None (only unitary operation)
Rotation matrices in direct space. For each rotation matrix R,
a point in crystallographic coordinates, x, is sent as x' = Rx.
dtype='intc'
shape=(rotations, 3, 3)
is_mesh_symmetry: bool, optional, default True
Wheather symmetry search is done or not.
"""
self._mesh = np.array(mesh_numbers, dtype='intc')
self._rec_lat = reciprocal_lattice
self._is_shift = self._shift2boolean(q_mesh_shift,
is_gamma_center=is_gamma_center)
self._is_time_reversal = is_time_reversal
self._fit_in_BZ = fit_in_BZ
self._rotations = rotations
self._is_mesh_symmetry = is_mesh_symmetry
self._ir_qpoints = None
self._grid_address = None
self._ir_grid_points = None
self._ir_weights = None
self._grid_mapping_table = None
if self._is_shift is None:
self._is_mesh_symmetry = False
self._is_shift = self._shift2boolean(None)
self._set_grid_points()
self._ir_qpoints += q_mesh_shift / self._mesh
self._fit_qpoints_in_BZ()
else: # zero or half shift
self._set_grid_points()
@property
def mesh_numbers(self):
return self._mesh
@property
def reciprocal_lattice(self):
return self._rec_lat
@property
def grid_address(self):
return self._grid_address
def get_grid_address(self):
warnings.warn("GridPoints.get_grid_address is deprecated."
"Use grid_address attribute.",
DeprecationWarning)
return self.grid_address
@property
def ir_grid_points(self):
return self._ir_grid_points
def get_ir_grid_points(self):
warnings.warn("GridPoints.get_ir_grid_points is deprecated."
"Use ir_grid_points attribute.",
DeprecationWarning)
return self.ir_grid_points
@property
def qpoints(self):
return self._ir_qpoints
def get_ir_qpoints(self):
warnings.warn("GridPoints.get_ir_qpoints is deprecated."
"Use points attribute.",
DeprecationWarning)
return self.qpoints
@property
def weights(self):
return self._ir_weights
def get_ir_grid_weights(self):
warnings.warn("GridPoints.get_ir_grid_weights is deprecated."
"Use weights attribute.",
DeprecationWarning)
return self.weights
@property
def grid_mapping_table(self):
return self._grid_mapping_table
def get_grid_mapping_table(self):
warnings.warn("GridPoints.get_grid_mapping_table is deprecated."
"Use grid_mapping_table attribute.",
DeprecationWarning)
return self.grid_mapping_table
def _set_grid_points(self):
if self._is_mesh_symmetry and self._has_mesh_symmetry():
self._set_ir_qpoints(self._rotations,
is_time_reversal=self._is_time_reversal)
else:
self._set_ir_qpoints([np.eye(3, dtype='intc')],
is_time_reversal=self._is_time_reversal)
def _shift2boolean(self,
q_mesh_shift,
is_gamma_center=False,
tolerance=1e-5):
"""
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
"""
if q_mesh_shift is None:
shift = np.zeros(3, dtype='double')
else:
shift = np.array(q_mesh_shift, dtype='double')
diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
if (diffby2 < 0.01).all(): # zero or half shift
diff = np.abs(shift - np.rint(shift))
if is_gamma_center:
is_shift = list(diff > 0.1)
else: # Monkhorst-pack
is_shift = list(np.logical_xor((diff > 0.1),
(self._mesh % 2 == 0)) * 1)
else:
is_shift = None
return is_shift
def _has_mesh_symmetry(self):
if self._rotations is None:
return False
m = self._mesh
mesh_equiv = [m[1] == m[2], m[2] == m[0], m[0] == m[1]]
lattice_equiv = get_lattice_vector_equivalence(
[r.T for r in self._rotations])
return np.extract(lattice_equiv, mesh_equiv).all()
def _fit_qpoints_in_BZ(self):
qpoint_set_in_BZ = get_qpoints_in_Brillouin_zone(self._rec_lat,
self._ir_qpoints)
qpoints_in_BZ = np.array([q_set[0] for q_set in qpoint_set_in_BZ],
dtype='double', order='C')
self._ir_qpoints = qpoints_in_BZ
def _set_ir_qpoints(self,
rotations,
is_time_reversal=True):
grid_mapping_table, grid_address = get_stabilized_reciprocal_mesh(
self._mesh,
rotations,
is_shift=self._is_shift,
is_time_reversal=is_time_reversal)
shift = np.array(self._is_shift, dtype='intc') * 0.5
if self._fit_in_BZ:
grid_address, _ = relocate_BZ_grid_address(
grid_address,
self._mesh,
self._rec_lat,
is_shift=self._is_shift)
self._grid_address = grid_address[:np.prod(self._mesh)]
else:
self._grid_address = grid_address
(self._ir_grid_points,
self._ir_weights) = extract_ir_grid_points(grid_mapping_table)
self._ir_qpoints = np.array(
(self._grid_address[self._ir_grid_points] + shift) / self._mesh,
dtype='double', order='C')
self._grid_mapping_table = grid_mapping_table
class GeneralizedRegularGridPoints(object):
"""Generalized regular grid points
Method strategy in suggest mode
-------------------------------
1. Create conventional unit cell using spglib.
2. Sample regular grid points for the conventional unit cell (mesh_numbers)
3. Transformation matrix from primitive to conventinal unit cell (inv_pmat)
4. Get supercell multiplicities (mesh_numbers) from the conventional unit
cell considering the lattice shape.
5. mmat = (inv_pmat * mesh_numbers).T, which is related to the
transformation from primitive cell to supercell.
6. D = P.mmat.Q, where D = diag([n1, n2, n3])
7. Grid points for primitive cell are
[np.dot(Q, g) for g in ndindex((n1, n2, n3))].
Method strategy in non-suggest mode
-----------------------------------
1. Find symmetry operations
2. Determine point group and transformation matrix (tmat) from input cell
3. Get supercell multiplicities (mesh_numbers) from the transformed cell
considering the lattice shape.
4. mmat = (tmat * mesh_numbers).T
5. D = P.mmat.Q, where D = diag([n1, n2, n3])
6. Grid points for primitive cell are
[np.dot(Q, g) for g in ndindex((n1, n2, n3))].
Attributes
----------
grid_address : ndarray
Grid addresses in integers.
shape=(num_grid_points, 3), dtype='intc', order='C'
qpoints : ndarray
q-points with respect to basis vectors of input or standardized
primitive cell.
shape=(num_grid_points, 3), dtype='intc', order='C'
grid_matrix : ndarray
Grid generating matrix.
shape=(3,3), dtype='intc', order='C'
matrix_to_primitive : ndarray or None
None when ``suggest`` is False. Otherwise, transformation matrix from
input cell to the suggested primitive cell.
shape=(3,3), dtype='double', order='C'
snf : SNF3x3
SNF3x3 instance of grid generating matrix.
"""
def __init__(self,
cell,
length,
suggest=True,
is_time_reversal=True,
x_fastest=True,
symprec=1e-5):
"""
Parameters
----------
cell : PhonopyAtoms
Input cell.
length : float
Length having the unit of direct space length.
suggest : bool, optional, default True
With True, a standardized primitive cell is suggested and the grids
are generated for it. With False, input cell is used.
is_time_reversal: bool, optional, default True
Time reversal symmetry is considered in symmetry search. By this,
inversion symmetry is always included.
x_fastest : bool, optional, default=True
In grid generation, [[x, y, z], ...], x runs fastest when True,
otherwise z runs fastest.
"""
self._cell = cell
self._length = length
self._suggest = suggest
self._is_time_reversal = is_time_reversal
self._x_fastest = x_fastest
self._grid_address = None
self._snf = None
self._transformation_matrix = None
self._grid_matrix = None
self._reciprocal_operations = None
self._prepare(cell, length, symprec)
self._generate_grid_points()
self._generate_q_points()
self._reciprocal_operations = get_reciprocal_operations(
self._sym_dataset['rotations'],
self._transformation_matrix,
self._snf.D,
self._snf.Q,
is_time_reversal=self._is_time_reversal)
@property
def grid_address(self):
return self._grid_address
@property
def qpoints(self):
return self._qpoints
@property
def grid_matrix(self):
"""Grid generating matrix"""
return self._grid_matrix
@property
def transformation_matrix(self):
"""Transformation matrix"""
return self._transformation_matrix
@property
def snf(self):
"""SNF3x3 instance of grid generating matrix"""
return self._snf
@property
def reciprocal_operations(self):
return self._reciprocal_operations
def _prepare(self, cell, length, symprec):
"""Define grid generating matrix and run the SNF"""
self._sym_dataset = get_symmetry_dataset(
cell.totuple(), symprec=symprec)
if self._suggest:
self._set_grid_matrix_by_std_primitive_cell(cell, length)
else:
self._set_grid_matrix_by_input_cell(cell, length)
self._snf = SNF3x3(self._grid_matrix)
self._snf.run()
def _set_grid_matrix_by_std_primitive_cell(self, cell, length):
"""Grid generating matrix based on standeardized primitive cell"""
tmat = self._sym_dataset['transformation_matrix']
centring = self._sym_dataset['international'][0]
pmat = get_primitive_matrix_by_centring(centring)
conv_lat = np.dot(np.linalg.inv(tmat).T, cell.cell)
num_cells = np.prod(length2mesh(length, conv_lat))
mesh_numbers = estimate_supercell_matrix(
self._sym_dataset,
max_num_atoms=num_cells * len(self._sym_dataset['std_types']))
inv_pmat = np.linalg.inv(pmat)
inv_pmat_int = np.rint(inv_pmat).astype(int)
assert (np.abs(inv_pmat - inv_pmat_int) < 1e-5).all()
# transpose in reciprocal space
self._grid_matrix = np.array(
(inv_pmat_int * mesh_numbers).T, dtype='intc', order='C')
# From input lattice to the primitive lattice in real space
self._transformation_matrix = np.array(
np.dot(np.linalg.inv(tmat), pmat), dtype='double', order='C')
def _set_grid_matrix_by_input_cell(self, input_cell, length):
"""Grid generating matrix based on input cell"""
pointgroup = get_pointgroup(self._sym_dataset['rotations'])
# tmat: From input lattice to point group preserving lattice
tmat = pointgroup[2]
lattice = np.dot(input_cell.cell.T, tmat).T
num_cells = np.prod(length2mesh(length, lattice))
mesh_numbers = estimate_supercell_matrix_from_pointgroup(
pointgroup[1], lattice, num_cells)
# transpose in reciprocal space
self._grid_matrix = np.array(
np.multiply(tmat, mesh_numbers).T, dtype='intc', order='C')
self._transformation_matrix = np.eye(3, dtype='double', order='C')
def _generate_grid_points(self):
d = np.diagonal(self._snf.D)
if self._x_fastest:
# x runs fastest.
z, y, x = np.meshgrid(range(d[2]), range(d[1]), range(d[0]),
indexing='ij')
else:
# z runs fastest.
x, y, z = np.meshgrid(range(d[0]), range(d[1]), range(d[2]),
indexing='ij')
self._grid_address = np.array(np.c_[x.ravel(), y.ravel(), z.ravel()],
dtype='intc', order='C')
def _generate_q_points(self):
D_inv = np.linalg.inv(self._snf.D)
qpoints = np.dot(
self._grid_address, np.dot(self._snf.Q, D_inv).T)
qpoints -= np.rint(qpoints)
self._qpoints = qpoints
def get_reciprocal_operations(rotations,
transformation_matrix,
D,
Q,
is_time_reversal=True):
"""Generate reciprocal rotation matrices
Collect unique real space rotation matrices and transpose them.
When is_time_reversal=True, inversion is added if it is not in the
list of the rotation matrices.
Parameters
----------
rotations : ndarray
Rotation matrices in real space. x' = Rx.
shape=(rotations, 3, 3), dtype='intc'
transformation_matrxi : array_like
Transformation matrix of basis vectors in real space. Using this
rotation matrices are transformed.
D : array_like
D of smith normal form 3x3.
shape=(3, 3)
Q : array_like
Q of smith normal form 3x3.
shape=(3, 3)
is_time_reversal : bool
When True, inversion operation is added.
Returns
-------
rotations_for_Q : ndarray
Rotation matrices in reciprocal space. Grid points are sent by the
symmetrically equivalent grid points as follows:
g' = (R_Q g) % diagonal(D)
shape=(rotations, 3, 3), dtype='intc', order='C'
"""
unique_rots = []
tmat_inv = np.linalg.inv(transformation_matrix)
for r in collect_unique_rotations(rotations):
_r = similarity_transformation(tmat_inv, r)
_r_int = np.rint(_r).astype(int)
assert (np.abs(_r - _r_int) < 1e-5).all()
unique_rots.append(_r_int)
ptg_ops, rec_ops = get_pointgroup_operations(
unique_rots, is_time_reversal=is_time_reversal)
Q_inv = np.linalg.inv(Q)
rec_ops_Q = []
for r in rec_ops:
_r = similarity_transformation(Q_inv, r)
_r = similarity_transformation(D, _r)
_r_int = np.rint(_r).astype(int)
assert (np.abs(_r - _r_int) < 1e-5).all()
assert abs(determinant(_r_int)) == 1
rec_ops_Q.append(_r_int)
return np.array(rec_ops_Q, dtype='intc', order='C')
| [
"numpy.maximum",
"numpy.abs",
"phonopy.structure.snf.SNF3x3",
"spglib.relocate_BZ_grid_address",
"phonopy.structure.cells.determinant",
"numpy.unique",
"numpy.prod",
"phonopy.structure.symmetry.get_pointgroup_operations",
"numpy.zeros_like",
"numpy.multiply",
"numpy.extract",
"numpy.logical_xo... | [((3043, 3065), 'numpy.linalg.inv', 'np.linalg.inv', (['lattice'], {}), '(lattice)\n', (3056, 3065), True, 'import numpy as np\n'), ((3595, 3630), 'numpy.maximum', 'np.maximum', (['mesh_numbers', '[1, 1, 1]'], {}), '(mesh_numbers, [1, 1, 1])\n', (3605, 3630), True, 'import numpy as np\n'), ((4596, 4629), 'numpy.zeros_like', 'np.zeros_like', (['grid_mapping_table'], {}), '(grid_mapping_table)\n', (4609, 4629), True, 'import numpy as np\n'), ((4720, 4767), 'numpy.array', 'np.array', (['weights[ir_grid_points]'], {'dtype': '"""intc"""'}), "(weights[ir_grid_points], dtype='intc')\n", (4728, 4767), True, 'import numpy as np\n'), ((22423, 22459), 'numpy.linalg.inv', 'np.linalg.inv', (['transformation_matrix'], {}), '(transformation_matrix)\n', (22436, 22459), True, 'import numpy as np\n'), ((22473, 22508), 'phonopy.structure.symmetry.collect_unique_rotations', 'collect_unique_rotations', (['rotations'], {}), '(rotations)\n', (22497, 22508), False, 'from phonopy.structure.symmetry import get_lattice_vector_equivalence, get_pointgroup_operations, collect_unique_rotations\n'), ((22712, 22785), 'phonopy.structure.symmetry.get_pointgroup_operations', 'get_pointgroup_operations', (['unique_rots'], {'is_time_reversal': 'is_time_reversal'}), '(unique_rots, is_time_reversal=is_time_reversal)\n', (22737, 22785), False, 'from phonopy.structure.symmetry import get_lattice_vector_equivalence, get_pointgroup_operations, collect_unique_rotations\n'), ((22808, 22824), 'numpy.linalg.inv', 'np.linalg.inv', (['Q'], {}), '(Q)\n', (22821, 22824), True, 'import numpy as np\n'), ((23142, 23186), 'numpy.array', 'np.array', (['rec_ops_Q'], {'dtype': '"""intc"""', 'order': '"""C"""'}), "(rec_ops_Q, dtype='intc', order='C')\n", (23150, 23186), True, 'import numpy as np\n'), ((4489, 4518), 'numpy.unique', 'np.unique', (['grid_mapping_table'], {}), '(grid_mapping_table)\n', (4498, 4518), True, 'import numpy as np\n'), ((8191, 8227), 'numpy.array', 'np.array', (['mesh_numbers'], {'dtype': '"""intc"""'}), "(mesh_numbers, dtype='intc')\n", (8199, 8227), True, 'import numpy as np\n'), ((9358, 9473), 'warnings.warn', 'warnings.warn', (['"""GridPoints.get_grid_address is deprecated.Use grid_address attribute."""', 'DeprecationWarning'], {}), "(\n 'GridPoints.get_grid_address is deprecated.Use grid_address attribute.',\n DeprecationWarning)\n", (9371, 9473), False, 'import warnings\n'), ((9669, 9789), 'warnings.warn', 'warnings.warn', (['"""GridPoints.get_ir_grid_points is deprecated.Use ir_grid_points attribute."""', 'DeprecationWarning'], {}), "(\n 'GridPoints.get_ir_grid_points is deprecated.Use ir_grid_points attribute.'\n , DeprecationWarning)\n", (9682, 9789), False, 'import warnings\n'), ((9971, 10073), 'warnings.warn', 'warnings.warn', (['"""GridPoints.get_ir_qpoints is deprecated.Use points attribute."""', 'DeprecationWarning'], {}), "('GridPoints.get_ir_qpoints is deprecated.Use points attribute.',\n DeprecationWarning)\n", (9984, 10073), False, 'import warnings\n'), ((10259, 10372), 'warnings.warn', 'warnings.warn', (['"""GridPoints.get_ir_grid_weights is deprecated.Use weights attribute."""', 'DeprecationWarning'], {}), "(\n 'GridPoints.get_ir_grid_weights is deprecated.Use weights attribute.',\n DeprecationWarning)\n", (10272, 10372), False, 'import warnings\n'), ((10575, 10703), 'warnings.warn', 'warnings.warn', (['"""GridPoints.get_grid_mapping_table is deprecated.Use grid_mapping_table attribute."""', 'DeprecationWarning'], {}), "(\n 'GridPoints.get_grid_mapping_table is deprecated.Use grid_mapping_table attribute.'\n , DeprecationWarning)\n", (10588, 10703), False, 'import warnings\n'), ((12285, 12347), 'phonopy.structure.symmetry.get_lattice_vector_equivalence', 'get_lattice_vector_equivalence', (['[r.T for r in self._rotations]'], {}), '([r.T for r in self._rotations])\n', (12315, 12347), False, 'from phonopy.structure.symmetry import get_lattice_vector_equivalence, get_pointgroup_operations, collect_unique_rotations\n'), ((12482, 12544), 'phonopy.structure.brillouin_zone.get_qpoints_in_Brillouin_zone', 'get_qpoints_in_Brillouin_zone', (['self._rec_lat', 'self._ir_qpoints'], {}), '(self._rec_lat, self._ir_qpoints)\n', (12511, 12544), False, 'from phonopy.structure.brillouin_zone import get_qpoints_in_Brillouin_zone\n'), ((12626, 12703), 'numpy.array', 'np.array', (['[q_set[0] for q_set in qpoint_set_in_BZ]'], {'dtype': '"""double"""', 'order': '"""C"""'}), "([q_set[0] for q_set in qpoint_set_in_BZ], dtype='double', order='C')\n", (12634, 12703), True, 'import numpy as np\n'), ((12935, 13053), 'spglib.get_stabilized_reciprocal_mesh', 'get_stabilized_reciprocal_mesh', (['self._mesh', 'rotations'], {'is_shift': 'self._is_shift', 'is_time_reversal': 'is_time_reversal'}), '(self._mesh, rotations, is_shift=self.\n _is_shift, is_time_reversal=is_time_reversal)\n', (12965, 13053), False, 'from spglib import get_stabilized_reciprocal_mesh, relocate_BZ_grid_address, get_symmetry_dataset, get_pointgroup\n'), ((13635, 13739), 'numpy.array', 'np.array', (['((self._grid_address[self._ir_grid_points] + shift) / self._mesh)'], {'dtype': '"""double"""', 'order': '"""C"""'}), "((self._grid_address[self._ir_grid_points] + shift) / self._mesh,\n dtype='double', order='C')\n", (13643, 13739), True, 'import numpy as np\n'), ((18456, 18481), 'phonopy.structure.snf.SNF3x3', 'SNF3x3', (['self._grid_matrix'], {}), '(self._grid_matrix)\n', (18462, 18481), False, 'from phonopy.structure.snf import SNF3x3\n'), ((18781, 18823), 'phonopy.structure.cells.get_primitive_matrix_by_centring', 'get_primitive_matrix_by_centring', (['centring'], {}), '(centring)\n', (18813, 18823), False, 'from phonopy.structure.cells import get_primitive_matrix_by_centring, estimate_supercell_matrix, estimate_supercell_matrix_from_pointgroup, determinant\n'), ((19118, 19137), 'numpy.linalg.inv', 'np.linalg.inv', (['pmat'], {}), '(pmat)\n', (19131, 19137), True, 'import numpy as np\n'), ((19321, 19387), 'numpy.array', 'np.array', (['(inv_pmat_int * mesh_numbers).T'], {'dtype': '"""intc"""', 'order': '"""C"""'}), "((inv_pmat_int * mesh_numbers).T, dtype='intc', order='C')\n", (19329, 19387), True, 'import numpy as np\n'), ((19737, 19783), 'spglib.get_pointgroup', 'get_pointgroup', (["self._sym_dataset['rotations']"], {}), "(self._sym_dataset['rotations'])\n", (19751, 19783), False, 'from spglib import get_stabilized_reciprocal_mesh, relocate_BZ_grid_address, get_symmetry_dataset, get_pointgroup\n'), ((20015, 20091), 'phonopy.structure.cells.estimate_supercell_matrix_from_pointgroup', 'estimate_supercell_matrix_from_pointgroup', (['pointgroup[1]', 'lattice', 'num_cells'], {}), '(pointgroup[1], lattice, num_cells)\n', (20056, 20091), False, 'from phonopy.structure.cells import get_primitive_matrix_by_centring, estimate_supercell_matrix, estimate_supercell_matrix_from_pointgroup, determinant\n'), ((20293, 20329), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': '"""double"""', 'order': '"""C"""'}), "(3, dtype='double', order='C')\n", (20299, 20329), True, 'import numpy as np\n'), ((20380, 20404), 'numpy.diagonal', 'np.diagonal', (['self._snf.D'], {}), '(self._snf.D)\n', (20391, 20404), True, 'import numpy as np\n'), ((20943, 20969), 'numpy.linalg.inv', 'np.linalg.inv', (['self._snf.D'], {}), '(self._snf.D)\n', (20956, 20969), True, 'import numpy as np\n'), ((21077, 21093), 'numpy.rint', 'np.rint', (['qpoints'], {}), '(qpoints)\n', (21084, 21093), True, 'import numpy as np\n'), ((22523, 22561), 'phonopy.harmonic.force_constants.similarity_transformation', 'similarity_transformation', (['tmat_inv', 'r'], {}), '(tmat_inv, r)\n', (22548, 22561), False, 'from phonopy.harmonic.force_constants import similarity_transformation\n'), ((22879, 22914), 'phonopy.harmonic.force_constants.similarity_transformation', 'similarity_transformation', (['Q_inv', 'r'], {}), '(Q_inv, r)\n', (22904, 22914), False, 'from phonopy.harmonic.force_constants import similarity_transformation\n'), ((22928, 22960), 'phonopy.harmonic.force_constants.similarity_transformation', 'similarity_transformation', (['D', '_r'], {}), '(D, _r)\n', (22953, 22960), False, 'from phonopy.harmonic.force_constants import similarity_transformation\n'), ((3108, 3142), 'numpy.dot', 'np.dot', (['rec_lattice.T', 'rec_lattice'], {}), '(rec_lattice.T, rec_lattice)\n', (3114, 3142), True, 'import numpy as np\n'), ((3164, 3197), 'numpy.rint', 'np.rint', (['(rec_lat_lengths * length)'], {}), '(rec_lat_lengths * length)\n', (3171, 3197), True, 'import numpy as np\n'), ((11496, 11523), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': '"""double"""'}), "(3, dtype='double')\n", (11504, 11523), True, 'import numpy as np\n'), ((11558, 11596), 'numpy.array', 'np.array', (['q_mesh_shift'], {'dtype': '"""double"""'}), "(q_mesh_shift, dtype='double')\n", (11566, 11596), True, 'import numpy as np\n'), ((13115, 13153), 'numpy.array', 'np.array', (['self._is_shift'], {'dtype': '"""intc"""'}), "(self._is_shift, dtype='intc')\n", (13123, 13153), True, 'import numpy as np\n'), ((13219, 13314), 'spglib.relocate_BZ_grid_address', 'relocate_BZ_grid_address', (['grid_address', 'self._mesh', 'self._rec_lat'], {'is_shift': 'self._is_shift'}), '(grid_address, self._mesh, self._rec_lat, is_shift=\n self._is_shift)\n', (13243, 13314), False, 'from spglib import get_stabilized_reciprocal_mesh, relocate_BZ_grid_address, get_symmetry_dataset, get_pointgroup\n'), ((19900, 19931), 'numpy.dot', 'np.dot', (['input_cell.cell.T', 'tmat'], {}), '(input_cell.cell.T, tmat)\n', (19906, 19931), True, 'import numpy as np\n'), ((11635, 11653), 'numpy.rint', 'np.rint', (['(shift * 2)'], {}), '(shift * 2)\n', (11642, 11653), True, 'import numpy as np\n'), ((12376, 12413), 'numpy.extract', 'np.extract', (['lattice_equiv', 'mesh_equiv'], {}), '(lattice_equiv, mesh_equiv)\n', (12386, 12413), True, 'import numpy as np\n'), ((18850, 18869), 'numpy.linalg.inv', 'np.linalg.inv', (['tmat'], {}), '(tmat)\n', (18863, 18869), True, 'import numpy as np\n'), ((19161, 19178), 'numpy.rint', 'np.rint', (['inv_pmat'], {}), '(inv_pmat)\n', (19168, 19178), True, 'import numpy as np\n'), ((19536, 19555), 'numpy.linalg.inv', 'np.linalg.inv', (['tmat'], {}), '(tmat)\n', (19549, 19555), True, 'import numpy as np\n'), ((20195, 20226), 'numpy.multiply', 'np.multiply', (['tmat', 'mesh_numbers'], {}), '(tmat, mesh_numbers)\n', (20206, 20226), True, 'import numpy as np\n'), ((21028, 21054), 'numpy.dot', 'np.dot', (['self._snf.Q', 'D_inv'], {}), '(self._snf.Q, D_inv)\n', (21034, 21054), True, 'import numpy as np\n'), ((22579, 22590), 'numpy.rint', 'np.rint', (['_r'], {}), '(_r)\n', (22586, 22590), True, 'import numpy as np\n'), ((22978, 22989), 'numpy.rint', 'np.rint', (['_r'], {}), '(_r)\n', (22985, 22989), True, 'import numpy as np\n'), ((23071, 23090), 'phonopy.structure.cells.determinant', 'determinant', (['_r_int'], {}), '(_r_int)\n', (23082, 23090), False, 'from phonopy.structure.cells import get_primitive_matrix_by_centring, estimate_supercell_matrix, estimate_supercell_matrix_from_pointgroup, determinant\n'), ((3322, 3341), 'numpy.array', 'np.array', (['rotations'], {}), '(rotations)\n', (3330, 3341), True, 'import numpy as np\n'), ((11050, 11073), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': '"""intc"""'}), "(3, dtype='intc')\n", (11056, 11073), True, 'import numpy as np\n'), ((11747, 11761), 'numpy.rint', 'np.rint', (['shift'], {}), '(shift)\n', (11754, 11761), True, 'import numpy as np\n'), ((13422, 13441), 'numpy.prod', 'np.prod', (['self._mesh'], {}), '(self._mesh)\n', (13429, 13441), True, 'import numpy as np\n'), ((19207, 19238), 'numpy.abs', 'np.abs', (['(inv_pmat - inv_pmat_int)'], {}), '(inv_pmat - inv_pmat_int)\n', (19213, 19238), True, 'import numpy as np\n'), ((22619, 22638), 'numpy.abs', 'np.abs', (['(_r - _r_int)'], {}), '(_r - _r_int)\n', (22625, 22638), True, 'import numpy as np\n'), ((23018, 23037), 'numpy.abs', 'np.abs', (['(_r - _r_int)'], {}), '(_r - _r_int)\n', (23024, 23037), True, 'import numpy as np\n'), ((11907, 11954), 'numpy.logical_xor', 'np.logical_xor', (['(diff > 0.1)', '(self._mesh % 2 == 0)'], {}), '(diff > 0.1, self._mesh % 2 == 0)\n', (11921, 11954), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Distributed under the MIT License.
# See LICENSE.txt for details.
from spectre.Visualization.Render1D import (find_extrema_over_data_set,
render_single_time)
import unittest
import os
import numpy as np
import matplotlib as mpl
mpl.use('agg')
class TestRender1D(unittest.TestCase):
def test_find_extrema_over_data_set(self):
test_array = np.array([1.1, 6.45, 0.34, 2.3])
expected_vals = (0.34, 6.45)
self.assertEqual(find_extrema_over_data_set(test_array), expected_vals)
def test_render_single_time(self):
var_name = "Variable Test"
time_slice = 1
output_prefix = "TestRenderSingleTime"
time = [0.0, 0.1]
coords = [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]
data = [[5.2, 4.5, 9.0, 2.0, 8.0], [1.1, 4.0, 6.0, 5.3, 3.0]]
# test whether a pdf file is saved when run
render_single_time(var_name, time_slice, output_prefix, time, coords,
data)
self.assertTrue(os.path.isfile(output_prefix + '.pdf'))
os.remove(output_prefix + '.pdf')
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"unittest.main",
"os.remove",
"spectre.Visualization.Render1D.find_extrema_over_data_set",
"os.path.isfile",
"matplotlib.use",
"numpy.array",
"spectre.Visualization.Render1D.render_single_time"
] | [((299, 313), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {}), "('agg')\n", (306, 313), True, 'import matplotlib as mpl\n'), ((1168, 1194), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1181, 1194), False, 'import unittest\n'), ((423, 455), 'numpy.array', 'np.array', (['[1.1, 6.45, 0.34, 2.3]'], {}), '([1.1, 6.45, 0.34, 2.3])\n', (431, 455), True, 'import numpy as np\n'), ((926, 1001), 'spectre.Visualization.Render1D.render_single_time', 'render_single_time', (['var_name', 'time_slice', 'output_prefix', 'time', 'coords', 'data'], {}), '(var_name, time_slice, output_prefix, time, coords, data)\n', (944, 1001), False, 'from spectre.Visualization.Render1D import find_extrema_over_data_set, render_single_time\n'), ((1101, 1134), 'os.remove', 'os.remove', (["(output_prefix + '.pdf')"], {}), "(output_prefix + '.pdf')\n", (1110, 1134), False, 'import os\n'), ((518, 556), 'spectre.Visualization.Render1D.find_extrema_over_data_set', 'find_extrema_over_data_set', (['test_array'], {}), '(test_array)\n', (544, 556), False, 'from spectre.Visualization.Render1D import find_extrema_over_data_set, render_single_time\n'), ((1053, 1091), 'os.path.isfile', 'os.path.isfile', (["(output_prefix + '.pdf')"], {}), "(output_prefix + '.pdf')\n", (1067, 1091), False, 'import os\n')] |
import os
import sys
import numpy as np
import flopy
import matplotlib.pyplot as plt
# --modify default matplotlib settings
updates = {
"font.family": ["Univers 57 Condensed", "Arial"],
"mathtext.default": "regular",
"pdf.compression": 0,
"pdf.fonttype": 42,
"legend.fontsize": 7,
"axes.labelsize": 8,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
}
plt.rcParams.update(updates)
def MergeData(ndim, zdata, tb):
sv = 0.05
md = np.empty((ndim), float)
md.fill(np.nan)
found = np.empty((ndim), bool)
found.fill(False)
for idx, layer in enumerate(zdata):
for jdx, z in enumerate(layer):
if found[jdx] == True:
continue
t0 = tb[idx][0] - sv
t1 = tb[idx][1] + sv
if z < t0 and z > t1:
md[jdx] = z
found[jdx] = True
return md
def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
for c in cc:
ax.plot([x0, x0 + dx], [y0, y0], color=c, linewidth=4)
ctxt = "{0:=3d} years".format(t0)
ax.text(x0 + 2.0 * dx, y0 + dy / 2.0, ctxt, size=5)
y0 += dy
t0 += dt
return
def run():
workspace = "swiex3"
cleanFiles = False
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--clean":
cleanFiles = True
elif basearg == "--pdf":
fext = "pdf"
if cleanFiles:
print("cleaning all files")
print("excluding *.py files")
files = os.listdir(workspace)
for f in files:
fpth = os.path.join(workspace, f)
if os.path.isdir(fpth):
continue
if ".py" != os.path.splitext(f)[1].lower():
print(" removing...{}".format(os.path.basename(f)))
try:
os.remove(fpth)
except:
pass
return 0
modelname = "swiex3"
exe_name = "mf2005"
nlay = 3
nrow = 1
ncol = 200
delr = 20.0
delc = 1.0
# well data
lrcQ1 = np.array([(0, 0, 199, 0.01), (2, 0, 199, 0.02)])
lrcQ2 = np.array([(0, 0, 199, 0.01 * 0.5), (2, 0, 199, 0.02 * 0.5)])
# ghb data
lrchc = np.zeros((30, 5))
lrchc[:, [0, 1, 3, 4]] = [0, 0, 0.0, 0.8 / 2.0]
lrchc[:, 2] = np.arange(0, 30)
# swi2 data
zini = np.hstack(
(-9 * np.ones(24), np.arange(-9, -50, -0.5), -50 * np.ones(94))
)[np.newaxis, :]
iso = np.zeros((1, 200), dtype=int)
iso[:, :30] = -2
# model objects
ml = flopy.modflow.Modflow(
modelname, version="mf2005", exe_name=exe_name, model_ws=workspace
)
discret = flopy.modflow.ModflowDis(
ml,
nrow=nrow,
ncol=ncol,
nlay=3,
delr=delr,
delc=delc,
laycbd=[0, 0, 0],
top=-9.0,
botm=[-29, -30, -50],
nper=2,
perlen=[365 * 1000, 1000 * 365],
nstp=[500, 500],
)
bas = flopy.modflow.ModflowBas(ml, ibound=1, strt=1.0)
bcf = flopy.modflow.ModflowBcf(
ml, laycon=[0, 0, 0], tran=[40.0, 1, 80.0], vcont=[0.005, 0.005]
)
wel = flopy.modflow.ModflowWel(ml, stress_period_data={0: lrcQ1, 1: lrcQ2})
ghb = flopy.modflow.ModflowGhb(ml, stress_period_data={0: lrchc})
swi = flopy.modflow.ModflowSwi2(
ml,
iswizt=55,
nsrf=1,
istrat=1,
toeslope=0.01,
tipslope=0.04,
nu=[0, 0.025],
zeta=[zini, zini, zini],
ssz=0.2,
isource=iso,
nsolver=1,
)
oc = flopy.modflow.ModflowOc(ml, save_every=100, save_types=["save head"])
pcg = flopy.modflow.ModflowPcg(ml)
# write the model files
ml.write_input()
# run the model
m = ml.run_model(silent=True)
headfile = os.path.join(workspace, "{}.hds".format(modelname))
hdobj = flopy.utils.HeadFile(headfile)
head = hdobj.get_data(totim=3.65000e05)
zetafile = os.path.join(workspace, "{}.zta".format(modelname))
zobj = flopy.utils.CellBudgetFile(zetafile)
zkstpkper = zobj.get_kstpkper()
zeta = []
for kk in zkstpkper:
zeta.append(zobj.get_data(kstpkper=kk, text="ZETASRF 1")[0])
zeta = np.array(zeta)
fwid, fhgt = 7.00, 4.50
flft, frgt, fbot, ftop = 0.125, 0.95, 0.125, 0.925
colormap = plt.cm.plasma # winter
cc = []
icolor = 11
cr = np.linspace(0.0, 0.9, icolor)
for idx in cr:
cc.append(colormap(idx))
lw = 0.5
x = np.arange(-30 * delr + 0.5 * delr, (ncol - 30) * delr, delr)
xedge = np.linspace(-30.0 * delr, (ncol - 30.0) * delr, len(x) + 1)
zedge = [[-9.0, -29.0], [-29.0, -30.0], [-30.0, -50.0]]
fig = plt.figure(figsize=(fwid, fhgt), facecolor="w")
fig.subplots_adjust(
wspace=0.25, hspace=0.25, left=flft, right=frgt, bottom=fbot, top=ftop
)
ax = fig.add_subplot(311)
ax.text(
-0.075,
1.05,
"A",
transform=ax.transAxes,
va="center",
ha="center",
size="8",
)
# confining unit
ax.fill(
[-600, 3400, 3400, -600],
[-29, -29, -30, -30],
fc=[0.8, 0.8, 0.8],
ec=[0.8, 0.8, 0.8],
)
#
z = np.copy(zini[0, :])
zr = z.copy()
p = (zr < -9.0) & (zr > -50.0)
ax.plot(x[p], zr[p], color=cc[0], linewidth=lw, drawstyle="steps-mid")
#
for i in range(5):
zt = MergeData(
ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge
)
dr = zt.copy()
ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle="steps-mid")
# Manufacture a legend bar
LegBar(ax, -200.0, -33.75, 0, 25, -2.5, 200, cc[0:6])
# axes
ax.set_ylim(-50, -9)
ax.set_ylabel("Elevation, in meters")
ax.set_xlim(-250.0, 2500.0)
ax = fig.add_subplot(312)
ax.text(
-0.075,
1.05,
"B",
transform=ax.transAxes,
va="center",
ha="center",
size="8",
)
# confining unit
ax.fill(
[-600, 3400, 3400, -600],
[-29, -29, -30, -30],
fc=[0.8, 0.8, 0.8],
ec=[0.8, 0.8, 0.8],
)
#
for i in range(4, 10):
zt = MergeData(
ncol, [zeta[i, 0, 0, :], zeta[i, 1, 0, :], zeta[i, 2, 0, :]], zedge
)
dr = zt.copy()
ax.plot(x, dr, color=cc[i + 1], linewidth=lw, drawstyle="steps-mid")
# Manufacture a legend bar
LegBar(ax, -200.0, -33.75, 1000, 25, -2.5, 200, cc[5:11])
# axes
ax.set_ylim(-50, -9)
ax.set_ylabel("Elevation, in meters")
ax.set_xlim(-250.0, 2500.0)
ax = fig.add_subplot(313)
ax.text(
-0.075,
1.05,
"C",
transform=ax.transAxes,
va="center",
ha="center",
size="8",
)
# confining unit
ax.fill(
[-600, 3400, 3400, -600],
[-29, -29, -30, -30],
fc=[0.8, 0.8, 0.8],
ec=[0.8, 0.8, 0.8],
)
#
zt = MergeData(
ncol, [zeta[4, 0, 0, :], zeta[4, 1, 0, :], zeta[4, 2, 0, :]], zedge
)
ax.plot(
x,
zt,
marker="o",
markersize=3,
linewidth=0.0,
markeredgecolor="blue",
markerfacecolor="None",
)
# <NAME>
zeta1 = -9 - 40.0 * (head[0, 0, :])
gbh = np.empty(len(zeta1), float)
gbho = np.empty(len(zeta1), float)
for idx, z1 in enumerate(zeta1):
if z1 >= -9.0 or z1 <= -50.0:
gbh[idx] = np.nan
gbho[idx] = 0.0
else:
gbh[idx] = z1
gbho[idx] = z1
ax.plot(x, gbh, "r")
np.savetxt(os.path.join(workspace, "Ghyben-Herzberg.out"), gbho)
# fake figures
ax.plot([-100.0, -100], [-100.0, -100], "r", label="Ghyben-Herzberg")
ax.plot(
[-100.0, -100],
[-100.0, -100],
"bo",
markersize=3,
markeredgecolor="blue",
markerfacecolor="None",
label="SWI2",
)
# legend
leg = ax.legend(loc="lower left", numpoints=1)
leg._drawFrame = False
# axes
ax.set_ylim(-50, -9)
ax.set_xlabel("Horizontal distance, in meters")
ax.set_ylabel("Elevation, in meters")
ax.set_xlim(-250.0, 2500.0)
outfig = os.path.join(workspace, "Figure08_swi2ex3.{0}".format(fext))
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
| [
"os.remove",
"flopy.modflow.ModflowOc",
"numpy.empty",
"numpy.ones",
"matplotlib.pyplot.figure",
"flopy.modflow.ModflowBcf",
"numpy.arange",
"flopy.modflow.ModflowSwi2",
"os.path.join",
"flopy.utils.CellBudgetFile",
"flopy.modflow.ModflowPcg",
"numpy.copy",
"flopy.modflow.ModflowGhb",
"mat... | [((384, 412), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['updates'], {}), '(updates)\n', (403, 412), True, 'import matplotlib.pyplot as plt\n'), ((470, 491), 'numpy.empty', 'np.empty', (['ndim', 'float'], {}), '(ndim, float)\n', (478, 491), True, 'import numpy as np\n'), ((526, 546), 'numpy.empty', 'np.empty', (['ndim', 'bool'], {}), '(ndim, bool)\n', (534, 546), True, 'import numpy as np\n'), ((2191, 2239), 'numpy.array', 'np.array', (['[(0, 0, 199, 0.01), (2, 0, 199, 0.02)]'], {}), '([(0, 0, 199, 0.01), (2, 0, 199, 0.02)])\n', (2199, 2239), True, 'import numpy as np\n'), ((2252, 2312), 'numpy.array', 'np.array', (['[(0, 0, 199, 0.01 * 0.5), (2, 0, 199, 0.02 * 0.5)]'], {}), '([(0, 0, 199, 0.01 * 0.5), (2, 0, 199, 0.02 * 0.5)])\n', (2260, 2312), True, 'import numpy as np\n'), ((2340, 2357), 'numpy.zeros', 'np.zeros', (['(30, 5)'], {}), '((30, 5))\n', (2348, 2357), True, 'import numpy as np\n'), ((2428, 2444), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {}), '(0, 30)\n', (2437, 2444), True, 'import numpy as np\n'), ((2586, 2615), 'numpy.zeros', 'np.zeros', (['(1, 200)'], {'dtype': 'int'}), '((1, 200), dtype=int)\n', (2594, 2615), True, 'import numpy as np\n'), ((2666, 2759), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', (['modelname'], {'version': '"""mf2005"""', 'exe_name': 'exe_name', 'model_ws': 'workspace'}), "(modelname, version='mf2005', exe_name=exe_name,\n model_ws=workspace)\n", (2687, 2759), False, 'import flopy\n'), ((2784, 2982), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['ml'], {'nrow': 'nrow', 'ncol': 'ncol', 'nlay': '(3)', 'delr': 'delr', 'delc': 'delc', 'laycbd': '[0, 0, 0]', 'top': '(-9.0)', 'botm': '[-29, -30, -50]', 'nper': '(2)', 'perlen': '[365 * 1000, 1000 * 365]', 'nstp': '[500, 500]'}), '(ml, nrow=nrow, ncol=ncol, nlay=3, delr=delr, delc=\n delc, laycbd=[0, 0, 0], top=-9.0, botm=[-29, -30, -50], nper=2, perlen=\n [365 * 1000, 1000 * 365], nstp=[500, 500])\n', (2808, 2982), False, 'import flopy\n'), ((3086, 3134), 'flopy.modflow.ModflowBas', 'flopy.modflow.ModflowBas', (['ml'], {'ibound': '(1)', 'strt': '(1.0)'}), '(ml, ibound=1, strt=1.0)\n', (3110, 3134), False, 'import flopy\n'), ((3145, 3240), 'flopy.modflow.ModflowBcf', 'flopy.modflow.ModflowBcf', (['ml'], {'laycon': '[0, 0, 0]', 'tran': '[40.0, 1, 80.0]', 'vcont': '[0.005, 0.005]'}), '(ml, laycon=[0, 0, 0], tran=[40.0, 1, 80.0], vcont=\n [0.005, 0.005])\n', (3169, 3240), False, 'import flopy\n'), ((3260, 3333), 'flopy.modflow.ModflowWel', 'flopy.modflow.ModflowWel', (['ml'], {'stress_period_data': '{(0): lrcQ1, (1): lrcQ2}'}), '(ml, stress_period_data={(0): lrcQ1, (1): lrcQ2})\n', (3284, 3333), False, 'import flopy\n'), ((3340, 3401), 'flopy.modflow.ModflowGhb', 'flopy.modflow.ModflowGhb', (['ml'], {'stress_period_data': '{(0): lrchc}'}), '(ml, stress_period_data={(0): lrchc})\n', (3364, 3401), False, 'import flopy\n'), ((3410, 3580), 'flopy.modflow.ModflowSwi2', 'flopy.modflow.ModflowSwi2', (['ml'], {'iswizt': '(55)', 'nsrf': '(1)', 'istrat': '(1)', 'toeslope': '(0.01)', 'tipslope': '(0.04)', 'nu': '[0, 0.025]', 'zeta': '[zini, zini, zini]', 'ssz': '(0.2)', 'isource': 'iso', 'nsolver': '(1)'}), '(ml, iswizt=55, nsrf=1, istrat=1, toeslope=0.01,\n tipslope=0.04, nu=[0, 0.025], zeta=[zini, zini, zini], ssz=0.2, isource\n =iso, nsolver=1)\n', (3435, 3580), False, 'import flopy\n'), ((3676, 3745), 'flopy.modflow.ModflowOc', 'flopy.modflow.ModflowOc', (['ml'], {'save_every': '(100)', 'save_types': "['save head']"}), "(ml, save_every=100, save_types=['save head'])\n", (3699, 3745), False, 'import flopy\n'), ((3756, 3784), 'flopy.modflow.ModflowPcg', 'flopy.modflow.ModflowPcg', (['ml'], {}), '(ml)\n', (3780, 3784), False, 'import flopy\n'), ((3968, 3998), 'flopy.utils.HeadFile', 'flopy.utils.HeadFile', (['headfile'], {}), '(headfile)\n', (3988, 3998), False, 'import flopy\n'), ((4122, 4158), 'flopy.utils.CellBudgetFile', 'flopy.utils.CellBudgetFile', (['zetafile'], {}), '(zetafile)\n', (4148, 4158), False, 'import flopy\n'), ((4315, 4329), 'numpy.array', 'np.array', (['zeta'], {}), '(zeta)\n', (4323, 4329), True, 'import numpy as np\n'), ((4491, 4520), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.9)', 'icolor'], {}), '(0.0, 0.9, icolor)\n', (4502, 4520), True, 'import numpy as np\n'), ((4595, 4655), 'numpy.arange', 'np.arange', (['(-30 * delr + 0.5 * delr)', '((ncol - 30) * delr)', 'delr'], {}), '(-30 * delr + 0.5 * delr, (ncol - 30) * delr, delr)\n', (4604, 4655), True, 'import numpy as np\n'), ((4799, 4846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fwid, fhgt)', 'facecolor': '"""w"""'}), "(figsize=(fwid, fhgt), facecolor='w')\n", (4809, 4846), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5335), 'numpy.copy', 'np.copy', (['zini[0, :]'], {}), '(zini[0, :])\n', (5323, 5335), True, 'import numpy as np\n'), ((1639, 1660), 'os.listdir', 'os.listdir', (['workspace'], {}), '(workspace)\n', (1649, 1660), False, 'import os\n'), ((7695, 7741), 'os.path.join', 'os.path.join', (['workspace', '"""Ghyben-Herzberg.out"""'], {}), "(workspace, 'Ghyben-Herzberg.out')\n", (7707, 7741), False, 'import os\n'), ((1704, 1730), 'os.path.join', 'os.path.join', (['workspace', 'f'], {}), '(workspace, f)\n', (1716, 1730), False, 'import os\n'), ((1746, 1765), 'os.path.isdir', 'os.path.isdir', (['fpth'], {}), '(fpth)\n', (1759, 1765), False, 'import os\n'), ((2510, 2534), 'numpy.arange', 'np.arange', (['(-9)', '(-50)', '(-0.5)'], {}), '(-9, -50, -0.5)\n', (2519, 2534), True, 'import numpy as np\n'), ((1958, 1973), 'os.remove', 'os.remove', (['fpth'], {}), '(fpth)\n', (1967, 1973), False, 'import os\n'), ((2497, 2508), 'numpy.ones', 'np.ones', (['(24)'], {}), '(24)\n', (2504, 2508), True, 'import numpy as np\n'), ((2542, 2553), 'numpy.ones', 'np.ones', (['(94)'], {}), '(94)\n', (2549, 2553), True, 'import numpy as np\n'), ((1895, 1914), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (1911, 1914), False, 'import os\n'), ((1816, 1835), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1832, 1835), False, 'import os\n')] |
#- Python 3 source code
#- qq-wait-times-dormant-bin5.py ~~
#
# This program creates a Quantile-Quantile plot (or more accurately here, a
# Percentile-Percentile plot) of the distributions for the wait times of
# bin 5 jobs submitted by non-CSC108 projects during the two weeks prior to
# the "dormant" period from July 21 through August 4, when CSC108 was not
# running ATLAS jobs, versus the wait times experienced during the dormant
# period itself.
#
# This program will not run correctly on OLCF machines unless the appropriate
# module has already been loaded:
#
# $ module load python_anaconda2
#
# ~~ (c) SRW, 24 Aug 2018
# ~~ last updated 04 Dec 2018
from datetime import datetime
import matplotlib
import matplotlib.pyplot as pyplot
import numpy
import os
import sqlite3
###
def analyze(connection):
cursor = connection.cursor()
query = """
SELECT DISTINCT
JobID,
(StartTime - SubmissionTime) AS WaitTime
FROM
active
WHERE
(Account != "CSC108" OR User != "doleynik")
AND SubmissionTime <= StartTime
-- An estimate for July 7 through July 21
AND (1530939600 < SampleTime AND SampleTime < 1532149200)
AND ((ReqNodes IS NULL
AND (ReqProcs / 16) <= 125)
OR (ReqNodes IS NOT NULL
AND ReqNodes <= 125))
;
"""
with_csc108 = []
for row in cursor.execute(query):
with_csc108.append(row["WaitTime"])
# Now we will change the query to find WaitTimes for jobs that ran while
# CSC108 was "dormant".
query = """
SELECT DISTINCT JobID,
(StartTime - SubmissionTime) AS WaitTime
FROM
active
WHERE
(Account != "CSC108" OR User != "doleynik")
AND SubmissionTime < StartTime
-- An estimate for July 21 through August 4
AND (1532149200 < SampleTime AND SampleTime < 1533358800)
AND ((ReqNodes IS NULL
AND (ReqProcs / 16) <= 125)
OR (ReqNodes IS NOT NULL
AND ReqNodes <= 125))
;
"""
wo_csc108 = []
for row in cursor.execute(query):
wo_csc108.append(row["WaitTime"])
# Next, compute the percentiles or quantiles. It really doesn't matter which,
# because we are only going to use those to relate the two distributions. I
# will just call them "marks".
marks_to_use = range(10, 90)
marks_with = numpy.percentile(with_csc108, marks_to_use)
marks_wo = numpy.percentile(wo_csc108, marks_to_use)
# Create the QQ plot.
fig = pyplot.figure()
ax = fig.add_subplot(111)
pyplot.plot(marks_with, marks_wo, 'bo')
ax.set(xlabel = "Pre-Dormant (July 7 - 21)",
ylabel = "Dormant (July 21 - August 4)",
title = "QQ Plot: Wait Times for Bin 5 Jobs for Fixed Time Periods")
ax.grid()
current_script = os.path.basename(__file__)
fig.savefig(os.path.splitext(current_script)[0] + ".png", dpi = 300)
###
def main():
# Store current working directory.
cwd = os.getcwd()
# Find the data directory, where this script is running remotely at OLCF and
# locally on a personal laptop, for example.
if os.path.isdir("/lustre/atlas/proj-shared/csc108/data/moab/"):
data_dir = "/lustre/atlas/proj-shared/csc108/data/moab/"
elif os.path.isdir(os.path.join(cwd, "moab")):
data_dir = os.path.join(cwd, "moab")
else:
raise Exception("Data directory not found.")
# Create string to represent path to database file.
dbfilename = os.path.join(data_dir, "moab-data.sqlite")
# Open connection to the database (file).
connection = sqlite3.connect(dbfilename)
# Enable users to access columns by name instead of by index.
connection.row_factory = sqlite3.Row
# Ensure read-only access to the database
connection.execute("PRAGMA query_only = true;")
# Run custom analyis code.
analyze(connection)
# Commit any changes and close the connection to the database.
connection.commit()
connection.close()
###
if __name__ == "__main__":
main()
#- vim:set syntax=python:
| [
"matplotlib.pyplot.plot",
"os.path.basename",
"os.getcwd",
"os.path.isdir",
"numpy.percentile",
"matplotlib.pyplot.figure",
"sqlite3.connect",
"os.path.splitext",
"os.path.join"
] | [((2761, 2804), 'numpy.percentile', 'numpy.percentile', (['with_csc108', 'marks_to_use'], {}), '(with_csc108, marks_to_use)\n', (2777, 2804), False, 'import numpy\n'), ((2820, 2861), 'numpy.percentile', 'numpy.percentile', (['wo_csc108', 'marks_to_use'], {}), '(wo_csc108, marks_to_use)\n', (2836, 2861), False, 'import numpy\n'), ((2898, 2913), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (2911, 2913), True, 'import matplotlib.pyplot as pyplot\n'), ((2949, 2988), 'matplotlib.pyplot.plot', 'pyplot.plot', (['marks_with', 'marks_wo', '"""bo"""'], {}), "(marks_with, marks_wo, 'bo')\n", (2960, 2988), True, 'import matplotlib.pyplot as pyplot\n'), ((3201, 3227), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (3217, 3227), False, 'import os\n'), ((3368, 3379), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3377, 3379), False, 'import os\n'), ((3515, 3575), 'os.path.isdir', 'os.path.isdir', (['"""/lustre/atlas/proj-shared/csc108/data/moab/"""'], {}), "('/lustre/atlas/proj-shared/csc108/data/moab/')\n", (3528, 3575), False, 'import os\n'), ((3874, 3916), 'os.path.join', 'os.path.join', (['data_dir', '"""moab-data.sqlite"""'], {}), "(data_dir, 'moab-data.sqlite')\n", (3886, 3916), False, 'import os\n'), ((3980, 4007), 'sqlite3.connect', 'sqlite3.connect', (['dbfilename'], {}), '(dbfilename)\n', (3995, 4007), False, 'import sqlite3\n'), ((3665, 3690), 'os.path.join', 'os.path.join', (['cwd', '"""moab"""'], {}), "(cwd, 'moab')\n", (3677, 3690), False, 'import os\n'), ((3712, 3737), 'os.path.join', 'os.path.join', (['cwd', '"""moab"""'], {}), "(cwd, 'moab')\n", (3724, 3737), False, 'import os\n'), ((3244, 3276), 'os.path.splitext', 'os.path.splitext', (['current_script'], {}), '(current_script)\n', (3260, 3276), False, 'import os\n')] |
import os
import ctypes
import numpy
from algopy.base_type import Ring
_ctps = numpy.ctypeslib.load_library('libctps', os.path.dirname(__file__))
double_ptr = ctypes.POINTER(ctypes.c_double)
argtypes1 = [ctypes.c_int, double_ptr, double_ptr, double_ptr]
_ctps.ctps_add.argtypes = argtypes1
_ctps.ctps_sub.argtypes = argtypes1
_ctps.ctps_mul.argtypes = argtypes1
_ctps.ctps_div.argtypes = argtypes1
class CTPS(Ring):
def __init__(self, data):
"""
CTPS = Cross Derivative Taylor Polynomial
Implements the factor ring R[t1,...,tK]/<t1^2,...,tK^2>
Calls C functions internally. I.e. functionality *should* be the same as for the class CTPS.
"""
self.data = numpy.array(data)
@classmethod
def __scalar_to_data__(cls, xdata, x):
xdata[0] = x
@classmethod
def __zeros_like__(cls, data):
return numpy.zeros_like(data)
@classmethod
def add(cls, retval_data, lhs_data, rhs_data):
K = retval_data.size
_ctps.ctps_add(K,
lhs_data.ctypes.data_as(double_ptr),
rhs_data.ctypes.data_as(double_ptr),
retval_data.ctypes.data_as(double_ptr))
@classmethod
def sub(cls, retval_data, lhs_data, rhs_data):
K = retval_data.size
_ctps.ctps_sub(K,
lhs_data.ctypes.data_as(double_ptr),
rhs_data.ctypes.data_as(double_ptr),
retval_data.ctypes.data_as(double_ptr))
@classmethod
def mul(cls, retval_data, lhs_data, rhs_data):
K = retval_data.size
_ctps.ctps_mul(K,
lhs_data.ctypes.data_as(double_ptr),
rhs_data.ctypes.data_as(double_ptr),
retval_data.ctypes.data_as(double_ptr))
@classmethod
def div(cls, retval_data, lhs_data, rhs_data):
K = retval_data.size
_ctps.ctps_div(K,
lhs_data.ctypes.data_as(double_ptr),
rhs_data.ctypes.data_as(double_ptr),
retval_data.ctypes.data_as(double_ptr))
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.data)
| [
"os.path.dirname",
"numpy.zeros_like",
"numpy.array",
"ctypes.POINTER"
] | [((163, 194), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (177, 194), False, 'import ctypes\n'), ((121, 146), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (136, 146), False, 'import os\n'), ((721, 738), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (732, 738), False, 'import numpy\n'), ((905, 927), 'numpy.zeros_like', 'numpy.zeros_like', (['data'], {}), '(data)\n', (921, 927), False, 'import numpy\n')] |
from __future__ import print_function
import argparse
import os
import h5py
import numpy as np
import sys
from molecules.model import MoleculeVAE
from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, \
decode_smiles_from_indexes, load_dataset
from pylab import figure, axes, scatter, title, show
from rdkit import Chem
from rdkit.Chem import Draw
LATENT_DIM = 292
TARGET = 'autoencoder'
def get_arguments():
parser = argparse.ArgumentParser(description='Molecular autoencoder network')
parser.add_argument('data', type=str, help='File of latent representation tensors for decoding.')
parser.add_argument('model', type=str, help='Trained Keras model to use.')
parser.add_argument('--save_h5', type=str, help='Name of a file to write HDF5 output to.')
parser.add_argument('--target', type=str, default=TARGET,
help='What model to sample from: autoencoder, encoder, decoder.')
parser.add_argument('--latent_dim', type=int, metavar='N', default=LATENT_DIM,
help='Dimensionality of the latent representation.')
return parser.parse_args()
def read_latent_data(filename):
h5f = h5py.File(filename, 'r')
data = h5f['latent_vectors'][:]
charset = h5f['charset'][:]
h5f.close()
return (data, charset)
def autoencoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
sampled = model.autoencoder.predict(data[0].reshape(1, 120, len(charset))).argmax(axis=2)[0]
mol = decode_smiles_from_indexes(map(from_one_hot_array, data[0]), charset)
sampled = decode_smiles_from_indexes(sampled, charset)
print(mol)
print(sampled)
def decoder(args, model):
latent_dim = args.latent_dim
data, charset = read_latent_data(args.data)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
sampled = model.decoder.predict(data[0].reshape(1, latent_dim)).argmax(axis=2)[0]
sampled = decode_smiles_from_indexes(sampled, charset)
print(sampled)
def encoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
x_latent = model.encoder.predict(data)
if args.save_h5:
h5f = h5py.File(args.save_h5, 'w')
h5f.create_dataset('charset', data = charset)
h5f.create_dataset('latent_vectors', data = x_latent)
h5f.close()
else:
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
def main():
args = get_arguments()
model = MoleculeVAE()
if args.target == 'autoencoder':
autoencoder(args, model)
elif args.target == 'encoder':
encoder(args, model)
elif args.target == 'decoder':
decoder(args, model)
if __name__ == '__main__':
main()
| [
"h5py.File",
"argparse.ArgumentParser",
"molecules.model.MoleculeVAE",
"numpy.savetxt",
"molecules.utils.decode_smiles_from_indexes",
"os.path.isfile",
"molecules.utils.load_dataset"
] | [((455, 523), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Molecular autoencoder network"""'}), "(description='Molecular autoencoder network')\n", (478, 523), False, 'import argparse\n'), ((1186, 1210), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1195, 1210), False, 'import h5py\n'), ((1407, 1443), 'molecules.utils.load_dataset', 'load_dataset', (['args.data'], {'split': '(False)'}), '(args.data, split=False)\n', (1419, 1443), False, 'from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, decode_smiles_from_indexes, load_dataset\n'), ((1454, 1480), 'os.path.isfile', 'os.path.isfile', (['args.model'], {}), '(args.model)\n', (1468, 1480), False, 'import os\n'), ((1823, 1867), 'molecules.utils.decode_smiles_from_indexes', 'decode_smiles_from_indexes', (['sampled', 'charset'], {}), '(sampled, charset)\n', (1849, 1867), False, 'from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, decode_smiles_from_indexes, load_dataset\n'), ((2018, 2044), 'os.path.isfile', 'os.path.isfile', (['args.model'], {}), '(args.model)\n', (2032, 2044), False, 'import os\n'), ((2296, 2340), 'molecules.utils.decode_smiles_from_indexes', 'decode_smiles_from_indexes', (['sampled', 'charset'], {}), '(sampled, charset)\n', (2322, 2340), False, 'from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, decode_smiles_from_indexes, load_dataset\n'), ((2440, 2476), 'molecules.utils.load_dataset', 'load_dataset', (['args.data'], {'split': '(False)'}), '(args.data, split=False)\n', (2452, 2476), False, 'from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, decode_smiles_from_indexes, load_dataset\n'), ((2487, 2513), 'os.path.isfile', 'os.path.isfile', (['args.model'], {}), '(args.model)\n', (2501, 2513), False, 'import os\n'), ((3029, 3042), 'molecules.model.MoleculeVAE', 'MoleculeVAE', ([], {}), '()\n', (3040, 3042), False, 'from molecules.model import MoleculeVAE\n'), ((2743, 2771), 'h5py.File', 'h5py.File', (['args.save_h5', '"""w"""'], {}), "(args.save_h5, 'w')\n", (2752, 2771), False, 'import h5py\n'), ((2926, 2974), 'numpy.savetxt', 'np.savetxt', (['sys.stdout', 'x_latent'], {'delimiter': '"""\t"""'}), "(sys.stdout, x_latent, delimiter='\\t')\n", (2936, 2974), True, 'import numpy as np\n')] |
import os
import numpy as np
from libyana.meshutils import meshio
def load_objects(obj_root):
object_names = [obj_name for obj_name in os.listdir(obj_root) if ".tgz" not in obj_name]
objects = {}
for obj_name in object_names:
# obj_path = os.path.join(obj_root, obj_name, "textured_simple_2000.obj")
obj_path = os.path.join(obj_root, obj_name, "textured_simple.obj") # TODO use full objects
with open(obj_path) as m_f:
mesh = meshio.fast_load_obj(m_f)[0]
objects[obj_name] = {"verts": mesh["vertices"], "faces": mesh["faces"]}
return objects
def load_corners(corner_root):
obj_corners = {}
for objname in os.listdir(corner_root):
filepath = os.path.join(corner_root, objname, "corners.npy")
corners = np.load(filepath)
obj_corners[objname] = corners
return obj_corners
def lineParser(line, annoDict):
"""
Parses a line in the 'anno.txt' and creates a entry in dict with lineid as key
:param line: line from 'anno.txt'
:param annoDict: dict in which an entry should be added
:return:
"""
lineList = line.split()
lineid = lineList[0]
objID = lineList[1]
paramsList = list(map(float, lineList[2:]))
assert lineid not in annoDict.keys(), "Something wrong with the annotation file..."
annoDict[lineid] = {
"objID": objID,
"handJoints": np.reshape(np.array(paramsList[:63]), [21, 3]),
"handPose": np.array(paramsList[63 : 63 + 48]),
"handTrans": np.array(paramsList[63 + 48 : 63 + 48 + 3]),
"handBeta": np.array(paramsList[63 + 48 + 3 : 63 + 48 + 3 + 10]),
"objRot": np.array(paramsList[63 + 48 + 3 + 10 : 63 + 48 + 3 + 10 + 3]),
"objTrans": np.array(paramsList[63 + 48 + 3 + 10 + 3 : 63 + 48 + 3 + 10 + 3 + 3]),
}
return annoDict
def parseAnnoTxt(filename):
"""
Parse the 'anno.txt'
:param filename: path to 'anno.txt'
:return: dict with lineid as keys
"""
ftxt = open(filename, "r")
annoLines = ftxt.readlines()
annoDict = {}
for line in annoLines:
lineParser(line, annoDict)
return annoDict
def project3DPoints(camMat, pts3D, isOpenGLCoords=True):
"""
Function for projecting 3d points to 2d
:param camMat: camera matrix
:param pts3D: 3D points
:param isOpenGLCoords: If True, hand/object along negative z-axis.
If False hand/object along positive z-axis
:return:
"""
assert pts3D.shape[-1] == 3
assert len(pts3D.shape) == 2
coordChangeMat = np.array([[1.0, 0.0, 0.0], [0, -1.0, 0.0], [0.0, 0.0, -1.0]], dtype=np.float32)
if isOpenGLCoords:
pts3D = pts3D.dot(coordChangeMat.T)
projPts = pts3D.dot(camMat.T)
projPts = np.stack([projPts[:, 0] / projPts[:, 2], projPts[:, 1] / projPts[:, 2]], axis=1)
assert len(projPts.shape) == 2
return projPts
| [
"numpy.stack",
"numpy.load",
"numpy.array",
"libyana.meshutils.meshio.fast_load_obj",
"os.path.join",
"os.listdir"
] | [((679, 702), 'os.listdir', 'os.listdir', (['corner_root'], {}), '(corner_root)\n', (689, 702), False, 'import os\n'), ((2559, 2638), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0, -1.0, 0.0], [0.0, 0.0, -1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 0.0], [0, -1.0, 0.0], [0.0, 0.0, -1.0]], dtype=np.float32)\n', (2567, 2638), True, 'import numpy as np\n'), ((2755, 2840), 'numpy.stack', 'np.stack', (['[projPts[:, 0] / projPts[:, 2], projPts[:, 1] / projPts[:, 2]]'], {'axis': '(1)'}), '([projPts[:, 0] / projPts[:, 2], projPts[:, 1] / projPts[:, 2]], axis=1\n )\n', (2763, 2840), True, 'import numpy as np\n'), ((342, 397), 'os.path.join', 'os.path.join', (['obj_root', 'obj_name', '"""textured_simple.obj"""'], {}), "(obj_root, obj_name, 'textured_simple.obj')\n", (354, 397), False, 'import os\n'), ((723, 772), 'os.path.join', 'os.path.join', (['corner_root', 'objname', '"""corners.npy"""'], {}), "(corner_root, objname, 'corners.npy')\n", (735, 772), False, 'import os\n'), ((791, 808), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (798, 808), True, 'import numpy as np\n'), ((1469, 1501), 'numpy.array', 'np.array', (['paramsList[63:63 + 48]'], {}), '(paramsList[63:63 + 48])\n', (1477, 1501), True, 'import numpy as np\n'), ((1526, 1567), 'numpy.array', 'np.array', (['paramsList[63 + 48:63 + 48 + 3]'], {}), '(paramsList[63 + 48:63 + 48 + 3])\n', (1534, 1567), True, 'import numpy as np\n'), ((1591, 1641), 'numpy.array', 'np.array', (['paramsList[63 + 48 + 3:63 + 48 + 3 + 10]'], {}), '(paramsList[63 + 48 + 3:63 + 48 + 3 + 10])\n', (1599, 1641), True, 'import numpy as np\n'), ((1663, 1722), 'numpy.array', 'np.array', (['paramsList[63 + 48 + 3 + 10:63 + 48 + 3 + 10 + 3]'], {}), '(paramsList[63 + 48 + 3 + 10:63 + 48 + 3 + 10 + 3])\n', (1671, 1722), True, 'import numpy as np\n'), ((1746, 1813), 'numpy.array', 'np.array', (['paramsList[63 + 48 + 3 + 10 + 3:63 + 48 + 3 + 10 + 3 + 3]'], {}), '(paramsList[63 + 48 + 3 + 10 + 3:63 + 48 + 3 + 10 + 3 + 3])\n', (1754, 1813), True, 'import numpy as np\n'), ((142, 162), 'os.listdir', 'os.listdir', (['obj_root'], {}), '(obj_root)\n', (152, 162), False, 'import os\n'), ((1412, 1437), 'numpy.array', 'np.array', (['paramsList[:63]'], {}), '(paramsList[:63])\n', (1420, 1437), True, 'import numpy as np\n'), ((478, 503), 'libyana.meshutils.meshio.fast_load_obj', 'meshio.fast_load_obj', (['m_f'], {}), '(m_f)\n', (498, 503), False, 'from libyana.meshutils import meshio\n')] |
"""
Create a flat plate of length 1.0 with aspect ratio 2.0 and a 40-degree
inclination.
The plate is discretized with spacing 0.04 in the x-y plane and with spacing
0.04 along the z-direction.
"""
import math
import pathlib
import numpy
# Flat-plate's parameters.
L = 1.0 # chord length
AR = 2.0 # aspect ratio
xc, yc, zc = 0.0, 0.0, 0.0 # center's coordinates
aoa = 40.0 # angle of inclination in degrees
ds = 0.04 # mesh spacing
simu_dir = pathlib.Path(__file__).absolute().parents[1]
# Generate coordinates of the flat plate.
n = math.ceil(L / ds)
s = numpy.linspace(xc - L / 2, xc + L / 2, num=n + 1)
x = xc + numpy.cos(numpy.radians(-aoa)) * s
y = yc + numpy.sin(numpy.radians(-aoa)) * s
nz = math.ceil(L * AR / ds)
z = numpy.linspace(zc - L * AR / 2, zc + L * AR / 2, num=nz + 1)
# Write coordinates into file.
filepath = simu_dir / 'flatplateAoA{}.body'.format(aoa)
with open(filepath, 'w') as outfile:
outfile.write('{}\n'.format(x.size * z.size))
for zi in z:
with open(filepath, 'ab') as outfile:
numpy.savetxt(outfile, numpy.c_[x, y, zi * numpy.ones(x.size)])
| [
"numpy.radians",
"math.ceil",
"numpy.ones",
"pathlib.Path",
"numpy.linspace"
] | [((544, 561), 'math.ceil', 'math.ceil', (['(L / ds)'], {}), '(L / ds)\n', (553, 561), False, 'import math\n'), ((566, 615), 'numpy.linspace', 'numpy.linspace', (['(xc - L / 2)', '(xc + L / 2)'], {'num': '(n + 1)'}), '(xc - L / 2, xc + L / 2, num=n + 1)\n', (580, 615), False, 'import numpy\n'), ((711, 733), 'math.ceil', 'math.ceil', (['(L * AR / ds)'], {}), '(L * AR / ds)\n', (720, 733), False, 'import math\n'), ((738, 798), 'numpy.linspace', 'numpy.linspace', (['(zc - L * AR / 2)', '(zc + L * AR / 2)'], {'num': '(nz + 1)'}), '(zc - L * AR / 2, zc + L * AR / 2, num=nz + 1)\n', (752, 798), False, 'import numpy\n'), ((636, 655), 'numpy.radians', 'numpy.radians', (['(-aoa)'], {}), '(-aoa)\n', (649, 655), False, 'import numpy\n'), ((680, 699), 'numpy.radians', 'numpy.radians', (['(-aoa)'], {}), '(-aoa)\n', (693, 699), False, 'import numpy\n'), ((452, 474), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (464, 474), False, 'import pathlib\n'), ((1080, 1098), 'numpy.ones', 'numpy.ones', (['x.size'], {}), '(x.size)\n', (1090, 1098), False, 'import numpy\n')] |
"""well_utils.py: functions used by the classes in resqpy.well"""
version = '10th November 2021'
# Nexus is a registered trademark of the Halliburton Company
# RMS and ROXAR are registered trademarks of Roxar Software Solutions AS, an Emerson company
import logging
log = logging.getLogger(__name__)
import numpy as np
import resqpy.olio.grid_functions as gf
import resqpy.olio.intersection as intersect
import resqpy.olio.keyword_files as kf
import resqpy.olio.xml_et as rqet
def load_hdf5_array(object, node, array_attribute, tag = 'Values', dtype = 'float', model = None):
"""Loads the property array data as an attribute of object, from the hdf5 referenced in xml node.
:meta private:
"""
assert (rqet.node_type(node) in ['DoubleHdf5Array', 'IntegerHdf5Array', 'Point3dHdf5Array'])
if model is None:
model = object.model
h5_key_pair = model.h5_uuid_and_path_for_node(node, tag = tag)
if h5_key_pair is None:
return None
return model.h5_array_element(h5_key_pair,
index = None,
cache_array = True,
dtype = dtype,
object = object,
array_attribute = array_attribute)
def extract_xyz(xyz_node):
"""Extracts an x,y,z coordinate from a solitary point xml node.
argument:
xyz_node: the xml node representing the solitary point (in 3D space)
returns:
triple float: (x, y, z) coordinates as a tuple
"""
if xyz_node is None:
return None
xyz = np.zeros(3)
for axis in range(3):
xyz[axis] = rqet.find_tag_float(xyz_node, 'Coordinate' + str(axis + 1), must_exist = True)
return tuple(xyz)
def well_names_in_cellio_file(cellio_file):
"""Returns a list of well names as found in the RMS blocked well export cell I/O file."""
well_list = []
with open(cellio_file, 'r') as fp:
while True:
kf.skip_blank_lines_and_comments(fp)
line = fp.readline() # file format version number?
if line == '':
break # end of file
fp.readline() # 'Undefined'
words = fp.readline().split()
assert len(words), 'missing header info (well name) in cell I/O file'
well_list.append(words[0])
while not kf.blank_line(fp):
fp.readline() # skip to block of data for next well
return well_list
# 'private' functions
def find_entry_and_exit(cp, entry_vector, exit_vector, well_name):
"""Returns (entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz).
:meta private:
"""
cell_centre = np.mean(cp, axis = (0, 1, 2))
face_triangles = gf.triangles_for_cell_faces(cp).reshape(-1, 3, 3) # flattened first index 4 values per face
entry_points = intersect.line_triangles_intersects(cell_centre, entry_vector, face_triangles, line_segment = True)
entry_axis = entry_polarity = entry_xyz = exit_xyz = None
for t in range(24):
if not np.any(np.isnan(entry_points[t])):
entry_xyz = entry_points[t]
entry_axis = t // 8
entry_polarity = (t - 8 * entry_axis) // 4
break
assert entry_axis is not None, 'failed to find entry face for a perforation in well ' + str(well_name)
exit_points = intersect.line_triangles_intersects(cell_centre, exit_vector, face_triangles, line_segment = True)
exit_axis = exit_polarity = None
for t in range(24):
if not np.any(np.isnan(exit_points[t])):
exit_xyz = entry_points[t]
exit_axis = t // 8
exit_polarity = (t - 8 * exit_axis) // 4
break
assert exit_axis is not None, 'failed to find exit face for a perforation in well ' + str(well_name)
return (entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz)
def _as_optional_array(arr):
"""If not None, cast as numpy array.
Casting directly to an array can be problematic: np.array(None) creates an unsized array, which is potentially
confusing.
"""
if arr is None:
return None
else:
return np.array(arr)
def _pl(i, e = False):
return '' if i == 1 else 'es' if e else 's'
def _derive_from_wellspec_verify_col_list(add_properties):
""" Verify additional properties to be added to the WELLSPEC file.
argument:
add_properties (boolean): if True, the additional properties specified will be added to the WELLSPEC file
returns:
list of columns to be added to the WELLSPEC file
"""
if add_properties:
if isinstance(add_properties, list):
col_list = ['IW', 'JW', 'L'] + [col.upper() for col in add_properties if col not in ['IW', 'JW', 'L']]
else:
col_list = []
else:
col_list = ['IW', 'JW', 'L', 'ANGLA', 'ANGLV']
return col_list
def _derive_from_wellspec_check_grid_name(check_grid_name, grid, col_list):
""" Verify the grid object to which the cell indices in the WELLSPEC table belong.
arguments:
check_grid_name (boolean): if True, the citation title of the grid will be extracted and returned
grid (grid object): the grid object whose citation titles will be returned
col_list (list): list of strings of column names to be added to the WELLSPEC file. If a citation title is
extracted from the grid object, 'GRID' will be added to the col_list
returns:
string of grid citation title extracted from the grid object
list of columns to be added to the WELLSPEC file
"""
if check_grid_name:
grid_name = rqet.citation_title_for_node(grid.root).upper()
if not grid_name:
name_for_check = None
else:
col_list.append('GRID')
name_for_check = grid_name
else:
name_for_check = None
return name_for_check, col_list
| [
"resqpy.olio.xml_et.citation_title_for_node",
"numpy.zeros",
"resqpy.olio.keyword_files.skip_blank_lines_and_comments",
"numpy.isnan",
"resqpy.olio.grid_functions.triangles_for_cell_faces",
"resqpy.olio.keyword_files.blank_line",
"numpy.mean",
"numpy.array",
"resqpy.olio.xml_et.node_type",
"resqpy... | [((276, 303), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (293, 303), False, 'import logging\n'), ((1616, 1627), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1624, 1627), True, 'import numpy as np\n'), ((2738, 2765), 'numpy.mean', 'np.mean', (['cp'], {'axis': '(0, 1, 2)'}), '(cp, axis=(0, 1, 2))\n', (2745, 2765), True, 'import numpy as np\n'), ((2901, 3002), 'resqpy.olio.intersection.line_triangles_intersects', 'intersect.line_triangles_intersects', (['cell_centre', 'entry_vector', 'face_triangles'], {'line_segment': '(True)'}), '(cell_centre, entry_vector,\n face_triangles, line_segment=True)\n', (2936, 3002), True, 'import resqpy.olio.intersection as intersect\n'), ((3407, 3507), 'resqpy.olio.intersection.line_triangles_intersects', 'intersect.line_triangles_intersects', (['cell_centre', 'exit_vector', 'face_triangles'], {'line_segment': '(True)'}), '(cell_centre, exit_vector,\n face_triangles, line_segment=True)\n', (3442, 3507), True, 'import resqpy.olio.intersection as intersect\n'), ((727, 747), 'resqpy.olio.xml_et.node_type', 'rqet.node_type', (['node'], {}), '(node)\n', (741, 747), True, 'import resqpy.olio.xml_et as rqet\n'), ((4226, 4239), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (4234, 4239), True, 'import numpy as np\n'), ((2006, 2042), 'resqpy.olio.keyword_files.skip_blank_lines_and_comments', 'kf.skip_blank_lines_and_comments', (['fp'], {}), '(fp)\n', (2038, 2042), True, 'import resqpy.olio.keyword_files as kf\n'), ((2789, 2820), 'resqpy.olio.grid_functions.triangles_for_cell_faces', 'gf.triangles_for_cell_faces', (['cp'], {}), '(cp)\n', (2816, 2820), True, 'import resqpy.olio.grid_functions as gf\n'), ((2397, 2414), 'resqpy.olio.keyword_files.blank_line', 'kf.blank_line', (['fp'], {}), '(fp)\n', (2410, 2414), True, 'import resqpy.olio.keyword_files as kf\n'), ((3109, 3134), 'numpy.isnan', 'np.isnan', (['entry_points[t]'], {}), '(entry_points[t])\n', (3117, 3134), True, 'import numpy as np\n'), ((3589, 3613), 'numpy.isnan', 'np.isnan', (['exit_points[t]'], {}), '(exit_points[t])\n', (3597, 3613), True, 'import numpy as np\n'), ((5725, 5764), 'resqpy.olio.xml_et.citation_title_for_node', 'rqet.citation_title_for_node', (['grid.root'], {}), '(grid.root)\n', (5753, 5764), True, 'import resqpy.olio.xml_et as rqet\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
import time
from math import ceil
from typing import List, Tuple
from unittest.mock import patch
import numpy as np
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.generator_run import GeneratorRun
from ax.core.metric import Metric
from ax.core.outcome_constraint import OutcomeConstraint
from ax.core.parameter import (
ChoiceParameter,
FixedParameter,
ParameterType,
RangeParameter,
)
from ax.core.types import ComparisonOp
from ax.exceptions.core import DataRequiredError, UnsupportedPlotError
from ax.metrics.branin import branin
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.registry import MODEL_KEY_TO_MODEL_SETUP, Models
from ax.service.ax_client import AxClient
from ax.storage.sqa_store.db import init_test_engine_and_session_factory
from ax.storage.sqa_store.decoder import Decoder
from ax.storage.sqa_store.encoder import Encoder
from ax.storage.sqa_store.sqa_config import SQAConfig
from ax.storage.sqa_store.structs import DBSettings
from ax.utils.common.testutils import TestCase
from ax.utils.common.timeutils import current_timestamp_in_millis
from ax.utils.common.typeutils import checked_cast, not_none
from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans
def run_trials_using_recommended_parallelism(
ax_client: AxClient,
recommended_parallelism: List[Tuple[int, int]],
total_trials: int,
) -> int:
remaining_trials = total_trials
for num_trials, parallelism_setting in recommended_parallelism:
if num_trials == -1:
num_trials = remaining_trials
for _ in range(ceil(num_trials / parallelism_setting)):
in_flight_trials = []
if parallelism_setting > remaining_trials:
parallelism_setting = remaining_trials
for _ in range(parallelism_setting):
params, idx = ax_client.get_next_trial()
in_flight_trials.append((params, idx))
remaining_trials -= 1
for _ in range(parallelism_setting):
params, idx = in_flight_trials.pop()
ax_client.complete_trial(idx, branin(params["x"], params["y"]))
# If all went well and no errors were raised, remaining_trials should be 0.
return remaining_trials
class TestAxClient(TestCase):
"""Tests service-like API functionality."""
def setUp(self):
# To avoid tests timing out due to GP fit / gen times.
patch.dict(
f"{Models.__module__}.MODEL_KEY_TO_MODEL_SETUP",
{"GPEI": MODEL_KEY_TO_MODEL_SETUP["Sobol"]},
).start()
def test_interruption(self) -> None:
ax_client = AxClient()
ax_client.create_experiment(
name="test",
parameters=[ # pyre-fixme[6]: expected union that should include
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
objective_name="branin",
minimize=True,
)
for i in range(6):
parameterization, trial_index = ax_client.get_next_trial()
self.assertFalse( # There should be non-complete trials.
all(t.status.is_terminal for t in ax_client.experiment.trials.values())
)
x, y = parameterization.get("x"), parameterization.get("y")
ax_client.complete_trial(
trial_index,
raw_data=checked_cast(
float, branin(checked_cast(float, x), checked_cast(float, y))
),
)
old_client = ax_client
serialized = ax_client.to_json_snapshot()
ax_client = AxClient.from_json_snapshot(serialized)
self.assertEqual(len(ax_client.experiment.trials.keys()), i + 1)
self.assertIsNot(ax_client, old_client)
self.assertTrue( # There should be no non-complete trials.
all(t.status.is_terminal for t in ax_client.experiment.trials.values())
)
@patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=([get_observation1()]),
)
@patch(
"ax.modelbridge.random.RandomModelBridge.get_training_data",
autospec=True,
return_value=([get_observation1()]),
)
@patch(
"ax.modelbridge.random.RandomModelBridge._predict",
autospec=True,
return_value=[get_observation1trans().data],
)
@patch(
"ax.modelbridge.random.RandomModelBridge.feature_importances",
autospec=True,
return_value={"x": 0.9, "y": 1.1},
)
def test_default_generation_strategy_continuous(self, _a, _b, _c, _d) -> None:
"""Test that Sobol+GPEI is used if no GenerationStrategy is provided."""
ax_client = AxClient()
ax_client.create_experiment(
parameters=[ # pyre-fixme[6]: expected union that should include
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
objective_name="a",
minimize=True,
)
self.assertEqual(
[s.model for s in not_none(ax_client.generation_strategy)._steps],
[Models.SOBOL, Models.GPEI],
)
with self.assertRaisesRegex(ValueError, ".* no trials"):
ax_client.get_optimization_trace(objective_optimum=branin.fmin)
for i in range(6):
parameterization, trial_index = ax_client.get_next_trial()
x, y = parameterization.get("x"), parameterization.get("y")
ax_client.complete_trial(
trial_index,
raw_data={
"a": (
checked_cast(
float,
branin(checked_cast(float, x), checked_cast(float, y)),
),
0.0,
)
},
sample_size=i,
)
self.assertEqual(ax_client.generation_strategy.model._model_key, "GPEI")
ax_client.get_optimization_trace(objective_optimum=branin.fmin)
ax_client.get_contour_plot()
ax_client.get_feature_importances()
trials_df = ax_client.get_trials_data_frame()
self.assertIn("x", trials_df)
self.assertIn("y", trials_df)
self.assertIn("a", trials_df)
self.assertEqual(len(trials_df), 6)
def test_default_generation_strategy_discrete(self) -> None:
"""Test that Sobol is used if no GenerationStrategy is provided and
the search space is discrete.
"""
# Test that Sobol is chosen when all parameters are choice.
ax_client = AxClient()
ax_client.create_experiment(
parameters=[ # pyre-fixme[6]: expected union that should include
{"name": "x", "type": "choice", "values": [1, 2, 3]},
{"name": "y", "type": "choice", "values": [1, 2, 3]},
]
)
self.assertEqual(
[s.model for s in not_none(ax_client.generation_strategy)._steps],
[Models.SOBOL],
)
self.assertEqual(ax_client.get_max_parallelism(), [(-1, -1)])
self.assertTrue(ax_client.get_trials_data_frame().empty)
def test_create_experiment(self) -> None:
"""Test basic experiment creation."""
ax_client = AxClient(
GenerationStrategy(
steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]
)
)
with self.assertRaisesRegex(ValueError, "Experiment not set on Ax client"):
ax_client.experiment
ax_client.create_experiment(
name="test_experiment",
parameters=[
{
"name": "x",
"type": "range",
"bounds": [0.001, 0.1],
"value_type": "float",
"log_scale": True,
},
{
"name": "y",
"type": "choice",
"values": [1, 2, 3],
"value_type": "int",
"is_ordered": True,
},
{"name": "x3", "type": "fixed", "value": 2, "value_type": "int"},
{
"name": "x4",
"type": "range",
"bounds": [1.0, 3.0],
"value_type": "int",
},
{
"name": "x5",
"type": "choice",
"values": ["one", "two", "three"],
"value_type": "str",
},
{
"name": "x6",
"type": "range",
"bounds": [1.0, 3.0],
"value_type": "int",
},
],
objective_name="test_objective",
minimize=True,
outcome_constraints=["some_metric >= 3", "some_metric <= 4.0"],
parameter_constraints=["x4 <= x6"],
)
assert ax_client._experiment is not None
self.assertEqual(ax_client._experiment, ax_client.experiment)
self.assertEqual(
ax_client._experiment.search_space.parameters["x"],
RangeParameter(
name="x",
parameter_type=ParameterType.FLOAT,
lower=0.001,
upper=0.1,
log_scale=True,
),
)
self.assertEqual(
ax_client._experiment.search_space.parameters["y"],
ChoiceParameter(
name="y",
parameter_type=ParameterType.INT,
values=[1, 2, 3],
is_ordered=True,
),
)
self.assertEqual(
ax_client._experiment.search_space.parameters["x3"],
FixedParameter(name="x3", parameter_type=ParameterType.INT, value=2),
)
self.assertEqual(
ax_client._experiment.search_space.parameters["x4"],
RangeParameter(
name="x4", parameter_type=ParameterType.INT, lower=1.0, upper=3.0
),
)
self.assertEqual(
ax_client._experiment.search_space.parameters["x5"],
ChoiceParameter(
name="x5",
parameter_type=ParameterType.STRING,
values=["one", "two", "three"],
),
)
self.assertEqual(
ax_client._experiment.optimization_config.outcome_constraints[0],
OutcomeConstraint(
metric=Metric(name="some_metric"),
op=ComparisonOp.GEQ,
bound=3.0,
relative=False,
),
)
self.assertEqual(
ax_client._experiment.optimization_config.outcome_constraints[1],
OutcomeConstraint(
metric=Metric(name="some_metric"),
op=ComparisonOp.LEQ,
bound=4.0,
relative=False,
),
)
self.assertTrue(ax_client._experiment.optimization_config.objective.minimize)
def test_constraint_same_as_objective(self):
"""Check that we do not allow constraints on the objective metric."""
ax_client = AxClient(
GenerationStrategy(
steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]
)
)
with self.assertRaises(ValueError):
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x3", "type": "fixed", "value": 2, "value_type": "int"}
],
objective_name="test_objective",
outcome_constraints=["test_objective >= 3"],
)
def test_raw_data_format(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
for _ in range(6):
parameterization, trial_index = ax_client.get_next_trial()
x, y = parameterization.get("x"), parameterization.get("y")
ax_client.complete_trial(trial_index, raw_data=(branin(x, y), 0.0))
with self.assertRaisesRegex(ValueError, "Raw data has an invalid type"):
ax_client.update_trial_data(trial_index, raw_data="invalid_data")
def test_raw_data_format_with_fidelities(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 1.0]},
],
minimize=True,
)
for _ in range(6):
parameterization, trial_index = ax_client.get_next_trial()
x, y = parameterization.get("x"), parameterization.get("y")
ax_client.complete_trial(
trial_index,
raw_data=[
({"y": y / 2.0}, {"objective": (branin(x, y / 2.0), 0.0)}),
({"y": y}, {"objective": (branin(x, y), 0.0)}),
],
)
def test_keep_generating_without_data(self):
# Check that normally numebr of arms to generate is enforced.
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
for _ in range(5):
parameterization, trial_index = ax_client.get_next_trial()
with self.assertRaisesRegex(DataRequiredError, "All trials for current model"):
ax_client.get_next_trial()
# Check thatwith enforce_sequential_optimization off, we can keep
# generating.
ax_client = AxClient(enforce_sequential_optimization=False)
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
self.assertFalse(
ax_client.generation_strategy._steps[0].enforce_num_trials, False
)
self.assertFalse(ax_client.generation_strategy._steps[1].max_parallelism, None)
for _ in range(10):
parameterization, trial_index = ax_client.get_next_trial()
def test_trial_completion(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
params, idx = ax_client.get_next_trial()
# Can't update before completing.
with self.assertRaisesRegex(ValueError, ".* not yet"):
ax_client.update_trial_data(
trial_index=idx, raw_data={"objective": (0, 0.0)}
)
ax_client.complete_trial(trial_index=idx, raw_data={"objective": (0, 0.0)})
# Cannot complete a trial twice, should use `update_trial_data`.
with self.assertRaisesRegex(ValueError, ".* already been completed"):
ax_client.complete_trial(trial_index=idx, raw_data={"objective": (0, 0.0)})
# Cannot update trial data with observation for a metric it already has.
with self.assertRaisesRegex(ValueError, ".* contained an observation"):
ax_client.update_trial_data(
trial_index=idx, raw_data={"objective": (0, 0.0)}
)
# Same as above, except objective name should be getting inferred.
with self.assertRaisesRegex(ValueError, ".* contained an observation"):
ax_client.update_trial_data(trial_index=idx, raw_data=1.0)
ax_client.update_trial_data(trial_index=idx, raw_data={"m1": (1, 0.0)})
metrics_in_data = ax_client.experiment.fetch_data().df["metric_name"].values
self.assertIn("m1", metrics_in_data)
self.assertIn("objective", metrics_in_data)
self.assertEqual(ax_client.get_best_parameters()[0], params)
params2, idy = ax_client.get_next_trial()
ax_client.complete_trial(trial_index=idy, raw_data=(-1, 0.0))
self.assertEqual(ax_client.get_best_parameters()[0], params2)
params3, idx3 = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=idx3, raw_data=-2, metadata={"dummy": "test"}
)
self.assertEqual(ax_client.get_best_parameters()[0], params3)
self.assertEqual(
ax_client.experiment.trials.get(2).run_metadata.get("dummy"), "test"
)
best_trial_values = ax_client.get_best_parameters()[1]
self.assertEqual(best_trial_values[0], {"objective": -2.0})
self.assertTrue(math.isnan(best_trial_values[1]["objective"]["objective"]))
def test_abandon_trial(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
# An abandoned trial adds no data.
params, idx = ax_client.get_next_trial()
ax_client.abandon_trial(trial_index=idx)
data = ax_client.experiment.fetch_data()
self.assertEqual(len(data.df.index), 0)
# Can't update a completed trial.
params2, idx2 = ax_client.get_next_trial()
ax_client.complete_trial(trial_index=idx2, raw_data={"objective": (0, 0.0)})
with self.assertRaisesRegex(ValueError, ".* in a terminal state."):
ax_client.abandon_trial(trial_index=idx2)
def test_ttl_trial(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
# A ttl trial that ends adds no data.
params, idx = ax_client.get_next_trial(ttl_seconds=1)
self.assertTrue(ax_client.experiment.trials.get(idx).status.is_running)
time.sleep(1) # Wait for TTL to elapse.
self.assertTrue(ax_client.experiment.trials.get(idx).status.is_failed)
# Also make sure we can no longer complete the trial as it is failed.
with self.assertRaisesRegex(
ValueError, ".* has been marked FAILED, so it no longer expects data."
):
ax_client.complete_trial(trial_index=idx, raw_data={"objective": (0, 0.0)})
params2, idy = ax_client.get_next_trial(ttl_seconds=1)
ax_client.complete_trial(trial_index=idy, raw_data=(-1, 0.0))
self.assertEqual(ax_client.get_best_parameters()[0], params2)
def test_start_and_end_time_in_trial_completion(self):
start_time = current_timestamp_in_millis()
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
params, idx = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=idx,
raw_data=1.0,
metadata={
"start_time": start_time,
"end_time": current_timestamp_in_millis(),
},
)
dat = ax_client.experiment.fetch_data().df
self.assertGreater(dat["end_time"][0], dat["start_time"][0])
def test_fail_on_batch(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
batch_trial = ax_client.experiment.new_batch_trial(
generator_run=GeneratorRun(
arms=[
Arm(parameters={"x": 0, "y": 1}),
Arm(parameters={"x": 0, "y": 1}),
]
)
)
with self.assertRaises(NotImplementedError):
ax_client.complete_trial(batch_trial.index, 0)
def test_log_failure(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
_, idx = ax_client.get_next_trial()
ax_client.log_trial_failure(idx, metadata={"dummy": "test"})
self.assertTrue(ax_client.experiment.trials.get(idx).status.is_failed)
self.assertEqual(
ax_client.experiment.trials.get(idx).run_metadata.get("dummy"), "test"
)
with self.assertRaisesRegex(ValueError, ".* no longer expects"):
ax_client.complete_trial(idx, {})
def test_attach_trial_and_get_trial_parameters(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
params, idx = ax_client.attach_trial(parameters={"x": 0.0, "y": 1.0})
ax_client.complete_trial(trial_index=idx, raw_data=5)
self.assertEqual(ax_client.get_best_parameters()[0], params)
self.assertEqual(
ax_client.get_trial_parameters(trial_index=idx), {"x": 0, "y": 1}
)
with self.assertRaises(ValueError):
ax_client.get_trial_parameters(
trial_index=10
) # No trial #10 in experiment.
with self.assertRaisesRegex(ValueError, ".* is of type"):
ax_client.attach_trial({"x": 1, "y": 2})
def test_attach_trial_ttl_seconds(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
params, idx = ax_client.attach_trial(
parameters={"x": 0.0, "y": 1.0}, ttl_seconds=1
)
self.assertTrue(ax_client.experiment.trials.get(idx).status.is_running)
time.sleep(1) # Wait for TTL to elapse.
self.assertTrue(ax_client.experiment.trials.get(idx).status.is_failed)
# Also make sure we can no longer complete the trial as it is failed.
with self.assertRaisesRegex(
ValueError, ".* has been marked FAILED, so it no longer expects data."
):
ax_client.complete_trial(trial_index=idx, raw_data=5)
params2, idx2 = ax_client.attach_trial(
parameters={"x": 0.0, "y": 1.0}, ttl_seconds=1
)
ax_client.complete_trial(trial_index=idx2, raw_data=5)
self.assertEqual(ax_client.get_best_parameters()[0], params2)
self.assertEqual(
ax_client.get_trial_parameters(trial_index=idx2), {"x": 0, "y": 1}
)
def test_attach_trial_numpy(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
params, idx = ax_client.attach_trial(parameters={"x": 0.0, "y": 1.0})
ax_client.complete_trial(trial_index=idx, raw_data=np.int32(5))
self.assertEqual(ax_client.get_best_parameters()[0], params)
def test_relative_oc_without_sq(self):
"""Must specify status quo to have relative outcome constraint."""
ax_client = AxClient()
with self.assertRaises(ValueError):
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
objective_name="test_objective",
minimize=True,
outcome_constraints=["some_metric <= 4.0%"],
)
def test_recommended_parallelism(self):
ax_client = AxClient()
with self.assertRaisesRegex(ValueError, "No generation strategy"):
ax_client.get_max_parallelism()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
self.assertEqual(ax_client.get_max_parallelism(), [(5, 5), (-1, 3)])
self.assertEqual(
run_trials_using_recommended_parallelism(
ax_client, ax_client.get_max_parallelism(), 20
),
0,
)
# With incorrect parallelism setting, the 'need more data' error should
# still be raised.
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
with self.assertRaisesRegex(DataRequiredError, "All trials for current model "):
run_trials_using_recommended_parallelism(ax_client, [(6, 6), (-1, 3)], 20)
@patch.dict(sys.modules, {"ax.storage.sqa_store.structs": None})
@patch.dict(sys.modules, {"sqalchemy": None})
@patch("ax.service.ax_client.DBSettings", None)
def test_no_sqa(self):
# Make sure we couldn't import sqa_store.structs (this could happen when
# SQLAlchemy is not installed).
with self.assertRaises(ModuleNotFoundError):
import ax_client.storage.sqa_store.structs # noqa F401
# Make sure we can still import ax_client.
__import__("ax.service.ax_client")
AxClient() # Make sure we still can instantiate client w/o db settings.
# DBSettings should be defined in `ax_client` now, but incorrectly typed
# `db_settings` argument should still make instantiation fail.
with self.assertRaisesRegex(ValueError, "`db_settings` argument should "):
AxClient(db_settings="badly_typed_db_settings")
def test_plotting_validation(self):
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x3", "type": "fixed", "value": 2, "value_type": "int"}
]
)
with self.assertRaisesRegex(ValueError, ".* there are no trials"):
ax_client.get_contour_plot()
with self.assertRaisesRegex(ValueError, ".* there are no trials"):
ax_client.get_feature_importances()
ax_client.get_next_trial()
with self.assertRaisesRegex(ValueError, ".* less than 2 parameters"):
ax_client.get_contour_plot()
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
]
)
ax_client.get_next_trial()
with self.assertRaisesRegex(ValueError, "If `param_x` is provided"):
ax_client.get_contour_plot(param_x="y")
with self.assertRaisesRegex(ValueError, "If `param_x` is provided"):
ax_client.get_contour_plot(param_y="y")
with self.assertRaisesRegex(ValueError, 'Parameter "x3"'):
ax_client.get_contour_plot(param_x="x3", param_y="x3")
with self.assertRaisesRegex(ValueError, 'Parameter "x4"'):
ax_client.get_contour_plot(param_x="x", param_y="x4")
with self.assertRaisesRegex(ValueError, 'Metric "nonexistent"'):
ax_client.get_contour_plot(
param_x="x", param_y="y", metric_name="nonexistent"
)
with self.assertRaisesRegex(UnsupportedPlotError, "Could not obtain contour"):
ax_client.get_contour_plot(
param_x="x", param_y="y", metric_name="objective"
)
with self.assertRaisesRegex(ValueError, "Could not obtain feature"):
ax_client.get_feature_importances()
def test_sqa_storage(self):
init_test_engine_and_session_factory(force_init=True)
config = SQAConfig()
encoder = Encoder(config=config)
decoder = Decoder(config=config)
db_settings = DBSettings(encoder=encoder, decoder=decoder)
ax_client = AxClient(db_settings=db_settings)
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
for _ in range(5):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=branin(*parameters.values())
)
gs = ax_client.generation_strategy
ax_client = AxClient(db_settings=db_settings)
ax_client.load_experiment_from_database("test_experiment")
# Trial #4 was completed after the last time the generation strategy
# generated candidates, so pre-save generation strategy was not
# "aware" of completion of trial #4. Post-restoration generation
# strategy is aware of it, however, since it gets restored with most
# up-to-date experiment data. Do adding trial #4 to the seen completed
# trials of pre-storage GS to check their equality otherwise.
gs._seen_trial_indices_by_status[TrialStatus.COMPLETED].add(4)
self.assertEqual(gs, ax_client.generation_strategy)
with self.assertRaises(ValueError):
# Overwriting existing experiment.
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
with self.assertRaises(ValueError):
# Overwriting existing experiment with overwrite flag with present
# DB settings. This should fail as we no longer allow overwriting
# experiments stored in the DB.
ax_client.create_experiment(
name="test_experiment",
parameters=[{"name": "x", "type": "range", "bounds": [-5.0, 10.0]}],
overwrite_existing_experiment=True,
)
# Original experiment should still be in DB and not have been overwritten.
self.assertEqual(len(ax_client.experiment.trials), 5)
def test_overwrite(self):
init_test_engine_and_session_factory(force_init=True)
ax_client = AxClient()
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
# Log a trial
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=branin(*parameters.values())
)
with self.assertRaises(ValueError):
# Overwriting existing experiment.
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
)
# Overwriting existing experiment with overwrite flag.
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x1", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "x2", "type": "range", "bounds": [0.0, 15.0]},
],
overwrite_existing_experiment=True,
)
# There should be no trials, as we just put in a fresh experiment.
self.assertEqual(len(ax_client.experiment.trials), 0)
# Log a trial
parameters, trial_index = ax_client.get_next_trial()
self.assertIn("x1", parameters.keys())
self.assertIn("x2", parameters.keys())
ax_client.complete_trial(
trial_index=trial_index, raw_data=branin(*parameters.values())
)
def test_fixed_random_seed_reproducibility(self):
ax_client = AxClient(random_seed=239)
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
]
)
for _ in range(5):
params, idx = ax_client.get_next_trial()
ax_client.complete_trial(idx, branin(params.get("x"), params.get("y")))
trial_parameters_1 = [
t.arm.parameters for t in ax_client.experiment.trials.values()
]
ax_client = AxClient(random_seed=239)
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
]
)
for _ in range(5):
params, idx = ax_client.get_next_trial()
ax_client.complete_trial(idx, branin(params.get("x"), params.get("y")))
trial_parameters_2 = [
t.arm.parameters for t in ax_client.experiment.trials.values()
]
self.assertEqual(trial_parameters_1, trial_parameters_2)
def test_init_position_saved(self):
ax_client = AxClient(random_seed=239)
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
name="sobol_init_position_test",
)
for _ in range(4):
# For each generated trial, snapshot the client before generating it,
# then recreate client, regenerate the trial and compare the trial
# generated before and after snapshotting. If the state of Sobol is
# recorded correctly, the newly generated trial will be the same as
# the one generated before the snapshotting.
serialized = ax_client.to_json_snapshot()
params, idx = ax_client.get_next_trial()
ax_client = AxClient.from_json_snapshot(serialized)
with self.subTest(ax=ax_client, params=params, idx=idx):
new_params, new_idx = ax_client.get_next_trial()
self.assertEqual(params, new_params)
self.assertEqual(idx, new_idx)
self.assertEqual(
ax_client.experiment.trials[
idx
]._generator_run._model_state_after_gen["init_position"],
idx + 1,
)
ax_client.complete_trial(idx, branin(params.get("x"), params.get("y")))
def test_unnamed_experiment_snapshot(self):
ax_client = AxClient(random_seed=239)
ax_client.create_experiment(
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
]
)
serialized = ax_client.to_json_snapshot()
ax_client = AxClient.from_json_snapshot(serialized)
self.assertIsNone(ax_client.experiment._name)
@patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=([get_observation1()]),
)
@patch(
"ax.modelbridge.random.RandomModelBridge.get_training_data",
autospec=True,
return_value=([get_observation1()]),
)
@patch(
"ax.modelbridge.random.RandomModelBridge._predict",
autospec=True,
return_value=[get_observation1trans().data],
)
def test_get_model_predictions(self, _predict, _tr_data, _obs_from_data):
ax_client = AxClient()
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
objective_name="a",
)
ax_client.get_next_trial()
ax_client.experiment.trials[0].arm._name = "1_1"
self.assertEqual(ax_client.get_model_predictions(), {0: {"a": (9.0, 1.0)}})
def test_deprecated_save_load_method_errors(self):
ax_client = AxClient()
with self.assertRaises(NotImplementedError):
ax_client.save()
with self.assertRaises(NotImplementedError):
ax_client.load()
with self.assertRaises(NotImplementedError):
ax_client.load_experiment("test_experiment")
with self.assertRaises(NotImplementedError):
ax_client.get_recommended_max_parallelism()
def test_find_last_trial_with_parameterization(self):
ax_client = AxClient()
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
objective_name="a",
)
params, trial_idx = ax_client.get_next_trial()
found_trial_idx = ax_client._find_last_trial_with_parameterization(
parameterization=params
)
self.assertEqual(found_trial_idx, trial_idx)
# Check that it's indeed the _last_ trial with params that is found.
_, new_trial_idx = ax_client.attach_trial(parameters=params)
found_trial_idx = ax_client._find_last_trial_with_parameterization(
parameterization=params
)
self.assertEqual(found_trial_idx, new_trial_idx)
with self.assertRaisesRegex(ValueError, "No .* matches"):
found_trial_idx = ax_client._find_last_trial_with_parameterization(
parameterization={k: v + 1.0 for k, v in params.items()}
)
def test_verify_parameterization(self):
ax_client = AxClient()
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
objective_name="a",
)
params, trial_idx = ax_client.get_next_trial()
self.assertTrue(
ax_client.verify_trial_parameterization(
trial_index=trial_idx, parameterization=params
)
)
# Make sure it still works if ordering in the parameterization is diff.
self.assertTrue(
ax_client.verify_trial_parameterization(
trial_index=trial_idx,
parameterization={k: params[k] for k in reversed(list(params.keys()))},
)
)
self.assertFalse(
ax_client.verify_trial_parameterization(
trial_index=trial_idx,
parameterization={k: v + 1.0 for k, v in params.items()},
)
)
def test_tracking_metric_addition(self):
ax_client = AxClient()
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
objective_name="a",
)
params, trial_idx = ax_client.get_next_trial()
self.assertEqual(list(ax_client.experiment.metrics.keys()), ["a"])
ax_client.complete_trial(trial_index=trial_idx, raw_data={"a": 1.0, "b": 2.0})
self.assertEqual(list(ax_client.experiment.metrics.keys()), ["b", "a"])
@patch(
"ax.core.experiment.Experiment.new_trial",
side_effect=RuntimeError("cholesky_cpu error - bad matrix"),
)
def test_annotate_exception(self, _):
ax_client = AxClient()
ax_client.create_experiment(
name="test_experiment",
parameters=[
{"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
{"name": "y", "type": "range", "bounds": [0.0, 15.0]},
],
minimize=True,
objective_name="a",
)
with self.assertRaisesRegex(
expected_exception=RuntimeError,
expected_regex="Cholesky errors typically occur",
):
ax_client.get_next_trial()
| [
"ax.storage.sqa_store.encoder.Encoder",
"ax.core.metric.Metric",
"ax.metrics.branin.branin",
"ax.storage.sqa_store.decoder.Decoder",
"ax.core.parameter.ChoiceParameter",
"ax.utils.common.typeutils.not_none",
"ax.service.ax_client.AxClient.from_json_snapshot",
"ax.core.parameter.FixedParameter",
"num... | [((4809, 4931), 'unittest.mock.patch', 'patch', (['"""ax.modelbridge.random.RandomModelBridge.feature_importances"""'], {'autospec': '(True)', 'return_value': "{'x': 0.9, 'y': 1.1}"}), "('ax.modelbridge.random.RandomModelBridge.feature_importances',\n autospec=True, return_value={'x': 0.9, 'y': 1.1})\n", (4814, 4931), False, 'from unittest.mock import patch\n'), ((26648, 26711), 'unittest.mock.patch.dict', 'patch.dict', (['sys.modules', "{'ax.storage.sqa_store.structs': None}"], {}), "(sys.modules, {'ax.storage.sqa_store.structs': None})\n", (26658, 26711), False, 'from unittest.mock import patch\n'), ((26717, 26761), 'unittest.mock.patch.dict', 'patch.dict', (['sys.modules', "{'sqalchemy': None}"], {}), "(sys.modules, {'sqalchemy': None})\n", (26727, 26761), False, 'from unittest.mock import patch\n'), ((26767, 26813), 'unittest.mock.patch', 'patch', (['"""ax.service.ax_client.DBSettings"""', 'None'], {}), "('ax.service.ax_client.DBSettings', None)\n", (26772, 26813), False, 'from unittest.mock import patch\n'), ((2952, 2962), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (2960, 2962), False, 'from ax.service.ax_client import AxClient\n'), ((5143, 5153), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (5151, 5153), False, 'from ax.service.ax_client import AxClient\n'), ((7109, 7119), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (7117, 7119), False, 'from ax.service.ax_client import AxClient\n'), ((12322, 12332), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (12330, 12332), False, 'from ax.service.ax_client import AxClient\n'), ((13072, 13082), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (13080, 13082), False, 'from ax.service.ax_client import AxClient\n'), ((13924, 13934), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (13932, 13934), False, 'from ax.service.ax_client import AxClient\n'), ((14533, 14580), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'enforce_sequential_optimization': '(False)'}), '(enforce_sequential_optimization=False)\n', (14541, 14580), False, 'from ax.service.ax_client import AxClient\n'), ((15197, 15207), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (15205, 15207), False, 'from ax.service.ax_client import AxClient\n'), ((17714, 17724), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (17722, 17724), False, 'from ax.service.ax_client import AxClient\n'), ((18581, 18591), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (18589, 18591), False, 'from ax.service.ax_client import AxClient\n'), ((19046, 19059), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (19056, 19059), False, 'import time\n'), ((19748, 19777), 'ax.utils.common.timeutils.current_timestamp_in_millis', 'current_timestamp_in_millis', ([], {}), '()\n', (19775, 19777), False, 'from ax.utils.common.timeutils import current_timestamp_in_millis\n'), ((19798, 19808), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (19806, 19808), False, 'from ax.service.ax_client import AxClient\n'), ((20528, 20538), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (20536, 20538), False, 'from ax.service.ax_client import AxClient\n'), ((21234, 21244), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (21242, 21244), False, 'from ax.service.ax_client import AxClient\n'), ((22011, 22021), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (22019, 22021), False, 'from ax.service.ax_client import AxClient\n'), ((22951, 22961), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (22959, 22961), False, 'from ax.service.ax_client import AxClient\n'), ((23422, 23435), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (23432, 23435), False, 'import time\n'), ((24243, 24253), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (24251, 24253), False, 'from ax.service.ax_client import AxClient\n'), ((24869, 24879), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (24877, 24879), False, 'from ax.service.ax_client import AxClient\n'), ((25424, 25434), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (25432, 25434), False, 'from ax.service.ax_client import AxClient\n'), ((26198, 26208), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (26206, 26208), False, 'from ax.service.ax_client import AxClient\n'), ((27185, 27195), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (27193, 27195), False, 'from ax.service.ax_client import AxClient\n'), ((27614, 27624), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (27622, 27624), False, 'from ax.service.ax_client import AxClient\n'), ((28205, 28215), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (28213, 28215), False, 'from ax.service.ax_client import AxClient\n'), ((29573, 29626), 'ax.storage.sqa_store.db.init_test_engine_and_session_factory', 'init_test_engine_and_session_factory', ([], {'force_init': '(True)'}), '(force_init=True)\n', (29609, 29626), False, 'from ax.storage.sqa_store.db import init_test_engine_and_session_factory\n'), ((29644, 29655), 'ax.storage.sqa_store.sqa_config.SQAConfig', 'SQAConfig', ([], {}), '()\n', (29653, 29655), False, 'from ax.storage.sqa_store.sqa_config import SQAConfig\n'), ((29674, 29696), 'ax.storage.sqa_store.encoder.Encoder', 'Encoder', ([], {'config': 'config'}), '(config=config)\n', (29681, 29696), False, 'from ax.storage.sqa_store.encoder import Encoder\n'), ((29715, 29737), 'ax.storage.sqa_store.decoder.Decoder', 'Decoder', ([], {'config': 'config'}), '(config=config)\n', (29722, 29737), False, 'from ax.storage.sqa_store.decoder import Decoder\n'), ((29760, 29804), 'ax.storage.sqa_store.structs.DBSettings', 'DBSettings', ([], {'encoder': 'encoder', 'decoder': 'decoder'}), '(encoder=encoder, decoder=decoder)\n', (29770, 29804), False, 'from ax.storage.sqa_store.structs import DBSettings\n'), ((29825, 29858), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'db_settings': 'db_settings'}), '(db_settings=db_settings)\n', (29833, 29858), False, 'from ax.service.ax_client import AxClient\n'), ((30438, 30471), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'db_settings': 'db_settings'}), '(db_settings=db_settings)\n', (30446, 30471), False, 'from ax.service.ax_client import AxClient\n'), ((32195, 32248), 'ax.storage.sqa_store.db.init_test_engine_and_session_factory', 'init_test_engine_and_session_factory', ([], {'force_init': '(True)'}), '(force_init=True)\n', (32231, 32248), False, 'from ax.storage.sqa_store.db import init_test_engine_and_session_factory\n'), ((32269, 32279), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (32277, 32279), False, 'from ax.service.ax_client import AxClient\n'), ((34081, 34106), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'random_seed': '(239)'}), '(random_seed=239)\n', (34089, 34106), False, 'from ax.service.ax_client import AxClient\n'), ((34636, 34661), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'random_seed': '(239)'}), '(random_seed=239)\n', (34644, 34661), False, 'from ax.service.ax_client import AxClient\n'), ((35297, 35322), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'random_seed': '(239)'}), '(random_seed=239)\n', (35305, 35322), False, 'from ax.service.ax_client import AxClient\n'), ((36797, 36822), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'random_seed': '(239)'}), '(random_seed=239)\n', (36805, 36822), False, 'from ax.service.ax_client import AxClient\n'), ((37122, 37161), 'ax.service.ax_client.AxClient.from_json_snapshot', 'AxClient.from_json_snapshot', (['serialized'], {}), '(serialized)\n', (37149, 37161), False, 'from ax.service.ax_client import AxClient\n'), ((37764, 37774), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (37772, 37774), False, 'from ax.service.ax_client import AxClient\n'), ((38352, 38362), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (38360, 38362), False, 'from ax.service.ax_client import AxClient\n'), ((38825, 38835), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (38833, 38835), False, 'from ax.service.ax_client import AxClient\n'), ((40014, 40024), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (40022, 40024), False, 'from ax.service.ax_client import AxClient\n'), ((41161, 41171), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (41169, 41171), False, 'from ax.service.ax_client import AxClient\n'), ((41995, 42005), 'ax.service.ax_client.AxClient', 'AxClient', ([], {}), '()\n', (42003, 42005), False, 'from ax.service.ax_client import AxClient\n'), ((1895, 1933), 'math.ceil', 'ceil', (['(num_trials / parallelism_setting)'], {}), '(num_trials / parallelism_setting)\n', (1899, 1933), False, 'from math import ceil\n'), ((4011, 4050), 'ax.service.ax_client.AxClient.from_json_snapshot', 'AxClient.from_json_snapshot', (['serialized'], {}), '(serialized)\n', (4038, 4050), False, 'from ax.service.ax_client import AxClient\n'), ((9719, 9823), 'ax.core.parameter.RangeParameter', 'RangeParameter', ([], {'name': '"""x"""', 'parameter_type': 'ParameterType.FLOAT', 'lower': '(0.001)', 'upper': '(0.1)', 'log_scale': '(True)'}), "(name='x', parameter_type=ParameterType.FLOAT, lower=0.001,\n upper=0.1, log_scale=True)\n", (9733, 9823), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((10028, 10127), 'ax.core.parameter.ChoiceParameter', 'ChoiceParameter', ([], {'name': '"""y"""', 'parameter_type': 'ParameterType.INT', 'values': '[1, 2, 3]', 'is_ordered': '(True)'}), "(name='y', parameter_type=ParameterType.INT, values=[1, 2, 3\n ], is_ordered=True)\n", (10043, 10127), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((10316, 10384), 'ax.core.parameter.FixedParameter', 'FixedParameter', ([], {'name': '"""x3"""', 'parameter_type': 'ParameterType.INT', 'value': '(2)'}), "(name='x3', parameter_type=ParameterType.INT, value=2)\n", (10330, 10384), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((10499, 10584), 'ax.core.parameter.RangeParameter', 'RangeParameter', ([], {'name': '"""x4"""', 'parameter_type': 'ParameterType.INT', 'lower': '(1.0)', 'upper': '(3.0)'}), "(name='x4', parameter_type=ParameterType.INT, lower=1.0,\n upper=3.0)\n", (10513, 10584), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((10725, 10825), 'ax.core.parameter.ChoiceParameter', 'ChoiceParameter', ([], {'name': '"""x5"""', 'parameter_type': 'ParameterType.STRING', 'values': "['one', 'two', 'three']"}), "(name='x5', parameter_type=ParameterType.STRING, values=[\n 'one', 'two', 'three'])\n", (10740, 10825), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((17599, 17657), 'math.isnan', 'math.isnan', (["best_trial_values[1]['objective']['objective']"], {}), "(best_trial_values[1]['objective']['objective'])\n", (17609, 17657), False, 'import math\n'), ((27505, 27552), 'ax.service.ax_client.AxClient', 'AxClient', ([], {'db_settings': '"""badly_typed_db_settings"""'}), "(db_settings='badly_typed_db_settings')\n", (27513, 27552), False, 'from ax.service.ax_client import AxClient\n'), ((36134, 36173), 'ax.service.ax_client.AxClient.from_json_snapshot', 'AxClient.from_json_snapshot', (['serialized'], {}), '(serialized)\n', (36161, 36173), False, 'from ax.service.ax_client import AxClient\n'), ((2742, 2850), 'unittest.mock.patch.dict', 'patch.dict', (['f"""{Models.__module__}.MODEL_KEY_TO_MODEL_SETUP"""', "{'GPEI': MODEL_KEY_TO_MODEL_SETUP['Sobol']}"], {}), "(f'{Models.__module__}.MODEL_KEY_TO_MODEL_SETUP', {'GPEI':\n MODEL_KEY_TO_MODEL_SETUP['Sobol']})\n", (2752, 2850), False, 'from unittest.mock import patch\n'), ((4467, 4485), 'ax.utils.testing.modeling_stubs.get_observation1', 'get_observation1', ([], {}), '()\n', (4483, 4485), False, 'from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n'), ((4622, 4640), 'ax.utils.testing.modeling_stubs.get_observation1', 'get_observation1', ([], {}), '()\n', (4638, 4640), False, 'from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n'), ((24648, 24659), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (24656, 24659), True, 'import numpy as np\n'), ((37329, 37347), 'ax.utils.testing.modeling_stubs.get_observation1', 'get_observation1', ([], {}), '()\n', (37345, 37347), False, 'from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n'), ((37484, 37502), 'ax.utils.testing.modeling_stubs.get_observation1', 'get_observation1', ([], {}), '()\n', (37500, 37502), False, 'from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n'), ((2427, 2459), 'ax.metrics.branin.branin', 'branin', (["params['x']", "params['y']"], {}), "(params['x'], params['y'])\n", (2433, 2459), False, 'from ax.metrics.branin import branin\n'), ((4767, 4790), 'ax.utils.testing.modeling_stubs.get_observation1trans', 'get_observation1trans', ([], {}), '()\n', (4788, 4790), False, 'from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n'), ((11053, 11079), 'ax.core.metric.Metric', 'Metric', ([], {'name': '"""some_metric"""'}), "(name='some_metric')\n", (11059, 11079), False, 'from ax.core.metric import Metric\n'), ((11360, 11386), 'ax.core.metric.Metric', 'Metric', ([], {'name': '"""some_metric"""'}), "(name='some_metric')\n", (11366, 11386), False, 'from ax.core.metric import Metric\n'), ((20297, 20326), 'ax.utils.common.timeutils.current_timestamp_in_millis', 'current_timestamp_in_millis', ([], {}), '()\n', (20324, 20326), False, 'from ax.utils.common.timeutils import current_timestamp_in_millis\n'), ((37629, 37652), 'ax.utils.testing.modeling_stubs.get_observation1trans', 'get_observation1trans', ([], {}), '()\n', (37650, 37652), False, 'from ax.utils.testing.modeling_stubs import get_observation1, get_observation1trans\n'), ((5552, 5591), 'ax.utils.common.typeutils.not_none', 'not_none', (['ax_client.generation_strategy'], {}), '(ax_client.generation_strategy)\n', (5560, 5591), False, 'from ax.utils.common.typeutils import checked_cast, not_none\n'), ((7455, 7494), 'ax.utils.common.typeutils.not_none', 'not_none', (['ax_client.generation_strategy'], {}), '(ax_client.generation_strategy)\n', (7463, 7494), False, 'from ax.utils.common.typeutils import checked_cast, not_none\n'), ((7855, 7904), 'ax.modelbridge.generation_strategy.GenerationStep', 'GenerationStep', ([], {'model': 'Models.SOBOL', 'num_trials': '(30)'}), '(model=Models.SOBOL, num_trials=30)\n', (7869, 7904), False, 'from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy\n'), ((11808, 11857), 'ax.modelbridge.generation_strategy.GenerationStep', 'GenerationStep', ([], {'model': 'Models.SOBOL', 'num_trials': '(30)'}), '(model=Models.SOBOL, num_trials=30)\n', (11822, 11857), False, 'from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy\n'), ((12820, 12832), 'ax.metrics.branin.branin', 'branin', (['x', 'y'], {}), '(x, y)\n', (12826, 12832), False, 'from ax.metrics.branin import branin\n'), ((3817, 3839), 'ax.utils.common.typeutils.checked_cast', 'checked_cast', (['float', 'x'], {}), '(float, x)\n', (3829, 3839), False, 'from ax.utils.common.typeutils import checked_cast, not_none\n'), ((3841, 3863), 'ax.utils.common.typeutils.checked_cast', 'checked_cast', (['float', 'y'], {}), '(float, y)\n', (3853, 3863), False, 'from ax.utils.common.typeutils import checked_cast, not_none\n'), ((20939, 20971), 'ax.core.arm.Arm', 'Arm', ([], {'parameters': "{'x': 0, 'y': 1}"}), "(parameters={'x': 0, 'y': 1})\n", (20942, 20971), False, 'from ax.core.arm import Arm\n'), ((20993, 21025), 'ax.core.arm.Arm', 'Arm', ([], {'parameters': "{'x': 0, 'y': 1}"}), "(parameters={'x': 0, 'y': 1})\n", (20996, 21025), False, 'from ax.core.arm import Arm\n'), ((6192, 6214), 'ax.utils.common.typeutils.checked_cast', 'checked_cast', (['float', 'x'], {}), '(float, x)\n', (6204, 6214), False, 'from ax.utils.common.typeutils import checked_cast, not_none\n'), ((6216, 6238), 'ax.utils.common.typeutils.checked_cast', 'checked_cast', (['float', 'y'], {}), '(float, y)\n', (6228, 6238), False, 'from ax.utils.common.typeutils import checked_cast, not_none\n'), ((13655, 13673), 'ax.metrics.branin.branin', 'branin', (['x', '(y / 2.0)'], {}), '(x, y / 2.0)\n', (13661, 13673), False, 'from ax.metrics.branin import branin\n'), ((13729, 13741), 'ax.metrics.branin.branin', 'branin', (['x', 'y'], {}), '(x, y)\n', (13735, 13741), False, 'from ax.metrics.branin import branin\n')] |
"""
main.py
@author: ksuchak1990
Python script for running experiments with the enkf.
"""
# Imports
import numpy as np
from experiment_utils import Modeller, Visualiser
np.random.seed(42)
# Functions
# def testing():
# """
# Testing function
# Overall function that wraps around what we want to run at any specific
# time.
# """
# with open('results/data.json') as json_file:
# data = json.load(json_file)
# forecasts, analyses, observations = process_repeat_results(data)
# plot_all_results(forecasts, analyses, observations)
# plot_with_errors(forecasts, analyses, observations)
# run_repeat_combos(resume=True)
# run_repeat_combos_mt(4)
# testing()
# process_batch(read_time=True)
# d = {'station': 'Grand_Central'}
# Modeller.run_repeat_combos(resume=False)
# Modeller.run_for_endtime()
# Modeller.run_experiment_1()
# Modeller.run_all(ensemble_size=10)
# Modeller.run_enkf_benchmark(ensemble_size=50, pop_size=50)
# Visualiser.quick_plot()
# Modeller.run_experiment_1_1()
Modeller.run_model_collisions()
| [
"experiment_utils.Modeller.run_model_collisions",
"numpy.random.seed"
] | [((171, 189), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (185, 189), True, 'import numpy as np\n'), ((1036, 1067), 'experiment_utils.Modeller.run_model_collisions', 'Modeller.run_model_collisions', ([], {}), '()\n', (1065, 1067), False, 'from experiment_utils import Modeller, Visualiser\n')] |
from base.base_train import BaseTrain
from tqdm import tqdm
import numpy as np
class ExampleTrainer(BaseTrain):
def __init__(self, sess, model, data, config,logger):
super(ExampleTrainer, self).__init__(sess, model, data, config,logger)
def train_epoch(self):
loop = tqdm(range(self.config.num_iter_per_epoch))
losses = []
accs = []
for _ in loop:
loss, acc = self.train_step()
losses.append(loss)
accs.append(acc)
loss = np.mean(losses)
print(loss)
acc = np.mean(accs)
cur_it = self.model.global_step_tensor.eval(self.sess)
summaries_dict = {
'loss': loss,
'acc': acc,
}
self.logger.summarize(cur_it, summaries_dict=summaries_dict)
def train_step(self):
batch_x, batch_y = next(self.data.next_batch(self.config.batch_size))
feed_dict = {self.model.x: batch_x, self.model.y: batch_y, self.model.is_training: True}
_, loss, acc = self.sess.run([self.model.train_step, self.model.sqm, self.model.accuracy],
feed_dict=feed_dict)
return loss, acc
| [
"numpy.mean"
] | [((518, 533), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (525, 533), True, 'import numpy as np\n'), ((568, 581), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (575, 581), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""Author: <NAME>
Helper functions to prepare and process data
Email: <EMAIL>
"""
from __future__ import division
import glob
import math
import errno
import os
import shutil
import numpy as np
import multiprocessing as mp
import insar.sario
from insar.log import get_log, log_runtime
logger = get_log()
def mkdir_p(path):
"""Emulates bash `mkdir -p`, in python style
Used for igrams directory creation
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def which(program):
"""Mimics UNIX which
Used from https://stackoverflow.com/a/377028"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def downsample_im(image, rate=10):
"""Takes a numpy matrix of an image and returns a smaller version
Args:
image (ndarray) 2D array of an image
rate (int) the reduction rate to downsample
"""
return image[::rate, ::rate]
def floor_float(num, ndigits):
"""Like rounding to ndigits, but flooring
Used for .dem.rsc creation, because rounding to 12 sigfigs
causes the fortran routines to overstep the matrix and fail,
since 0.000277777778*3600 = 1.00000000079.. , but
0.000277777777*3600 = 0.99999999719
Example:
>>> floor_float(1/3600, 12)
0.000277777777
"""
return math.floor((10**ndigits) * num) / (10**ndigits)
def clip(image):
"""Convert float image to only range 0 to 1 (clips)"""
return np.clip(np.abs(image), 0, 1)
def log(image):
"""Converts magnitude amplitude image to log scale"""
if np.iscomplexobj(image):
image = np.abs(image)
return 20 * np.log10(image)
# Alias: convert
db = log
def percent_zero(filepath=None, arr=None):
"""Function to give the percentage of a file that is exactly zero
Used as a quality assessment check
Args:
filepath (str): path to file to check
arr (ndarray): pre-loaded array to check
Returns:
float: decimal from 0 to 1, ratio of zeros to total entries
Example:
>>> a = np.array([[1 + 1j, 0.0], [1, 0.0001]])
>>> print(percent_zero(arr=a))
0.25
"""
if filepath:
arr = insar.sario.load(filepath)
return (np.sum(arr == 0) / arr.size)
def _check_and_move(fp, zero_threshold, test, mv_dir):
"""Wrapper func for clean_files multiprocessing"""
logger.debug("Checking {}".format(fp))
pct = percent_zero(filepath=fp)
if pct > zero_threshold:
logger.info("Moving {} for having {:.2f}% zeros to {}".format(fp, 100 * pct, mv_dir))
if not test:
shutil.move(fp, mv_dir)
@log_runtime
def clean_files(ext, path=".", zero_threshold=0.50, test=True):
"""Move files of type ext from path with a high pct of zeros
Args:
ext (str): file extension to open. Must be loadable by sario.load
path (str): path of directory to search
zero_threshold (float): between 0 and 1, threshold to delete files
if they contain greater ratio of zeros
test (bool): If true, doesn't delete files, just lists
"""
file_glob = os.path.join(path, "*{}".format(ext))
logger.info("Searching {} for files with zero threshold {}".format(file_glob, zero_threshold))
# Make a folder to store the bad geos
mv_dir = os.path.join(path, 'bad_{}'.format(ext.replace('.', '')))
mkdir_p(mv_dir) if not test else logger.info("Test mode: not moving files.")
max_procs = mp.cpu_count() // 2
pool = mp.Pool(processes=max_procs)
results = [
pool.apply_async(_check_and_move, (fp, zero_threshold, test, mv_dir))
for fp in glob.glob(file_glob)
]
# Now ask for results so processes launch
[res.get() for res in results]
def split_array_into_blocks(data):
"""Takes a long rectangular array (like UAVSAR) and creates blocks
Useful to look at small data pieces at a time in dismph
Returns:
blocks (list[np.ndarray])
"""
rows, cols = data.shape
blocks = np.array_split(data, np.ceil(rows / cols))
return blocks
def split_and_save(filename):
"""Creates several files from one long data file
Saves them with same filename with .1,.2,.3... at end before ext
e.g. brazos_14937_17087-002_17088-003_0001d_s01_L090HH_01.int produces
brazos_14937_17087-002_17088-003_0001d_s01_L090HH_01.1.int
brazos_14937_17087-002_17088-003_0001d_s01_L090HH_01.2.int...
Output:
newpaths (list[str]): full paths to new files created
"""
data = insar.sario.load_file(filename)
blocks = split_array_into_blocks(data)
ext = insar.sario.get_file_ext(filename)
newpaths = []
for idx, block in enumerate(blocks, start=1):
fname = filename.replace(ext, ".{}{}".format(str(idx), ext))
print("Saving {}".format(fname))
insar.sario.save(fname, block)
newpaths.append(fname)
return newpaths
def combine_cor_amp(corfilename, save=True):
"""Takes a .cor file from UAVSAR (which doesn't contain amplitude),
and creates a new file with amplitude data interleaved for dishgt
dishgt brazos_14937_17087-002_17088-003_0001d_s01_L090HH_01_withamp.cor 3300 1 5000 1
where 3300 is number of columns/samples, and we want the first 5000 rows. the final
1 is needed for the contour interval to set a max of 1 for .cor data
Inputs:
corfilename (str): string filename of the .cor from UAVSAR
save (bool): True if you want to save the combined array
Returns:
cor_with_amp (np.ndarray) combined correlation + amplitude (as complex64)
outfilename (str): same name as corfilename, but _withamp.cor
Saves a new file under outfilename
Note: .ann and .int files must be in same directory as .cor
"""
ext = insar.sario.get_file_ext(corfilename)
assert ext == '.cor', 'corfilename must be a .cor file'
intfilename = corfilename.replace('.cor', '.int')
intdata = insar.sario.load_file(intfilename)
amp = np.abs(intdata)
cordata = insar.sario.load_file(corfilename)
# For dishgt, it expects the two matrices stacked [[amp]; [cor]]
cor_with_amp = np.vstack((amp, cordata))
outfilename = corfilename.replace('.cor', '_withamp.cor')
insar.sario.save(outfilename, cor_with_amp)
return cor_with_amp, outfilename
| [
"numpy.abs",
"numpy.iscomplexobj",
"os.makedirs",
"numpy.sum",
"numpy.ceil",
"os.path.isdir",
"math.floor",
"os.path.isfile",
"insar.log.get_log",
"shutil.move",
"multiprocessing.Pool",
"glob.glob",
"numpy.log10",
"os.path.split",
"os.path.join",
"os.access",
"numpy.vstack",
"multi... | [((319, 328), 'insar.log.get_log', 'get_log', ([], {}), '()\n', (326, 328), False, 'from insar.log import get_log, log_runtime\n'), ((830, 852), 'os.path.split', 'os.path.split', (['program'], {}), '(program)\n', (843, 852), False, 'import os\n'), ((2022, 2044), 'numpy.iscomplexobj', 'np.iscomplexobj', (['image'], {}), '(image)\n', (2037, 2044), True, 'import numpy as np\n'), ((3949, 3977), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'max_procs'}), '(processes=max_procs)\n', (3956, 3977), True, 'import multiprocessing as mp\n'), ((6476, 6491), 'numpy.abs', 'np.abs', (['intdata'], {}), '(intdata)\n', (6482, 6491), True, 'import numpy as np\n'), ((6630, 6655), 'numpy.vstack', 'np.vstack', (['(amp, cordata)'], {}), '((amp, cordata))\n', (6639, 6655), True, 'import numpy as np\n'), ((463, 480), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (474, 480), False, 'import os\n'), ((1773, 1804), 'math.floor', 'math.floor', (['(10 ** ndigits * num)'], {}), '(10 ** ndigits * num)\n', (1783, 1804), False, 'import math\n'), ((1918, 1931), 'numpy.abs', 'np.abs', (['image'], {}), '(image)\n', (1924, 1931), True, 'import numpy as np\n'), ((2062, 2075), 'numpy.abs', 'np.abs', (['image'], {}), '(image)\n', (2068, 2075), True, 'import numpy as np\n'), ((2092, 2107), 'numpy.log10', 'np.log10', (['image'], {}), '(image)\n', (2100, 2107), True, 'import numpy as np\n'), ((2678, 2694), 'numpy.sum', 'np.sum', (['(arr == 0)'], {}), '(arr == 0)\n', (2684, 2694), True, 'import numpy as np\n'), ((3918, 3932), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3930, 3932), True, 'import multiprocessing as mp\n'), ((4485, 4505), 'numpy.ceil', 'np.ceil', (['(rows / cols)'], {}), '(rows / cols)\n', (4492, 4505), True, 'import numpy as np\n'), ((758, 779), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (772, 779), False, 'import os\n'), ((784, 809), 'os.access', 'os.access', (['fpath', 'os.X_OK'], {}), '(fpath, os.X_OK)\n', (793, 809), False, 'import os\n'), ((1013, 1040), 'os.path.join', 'os.path.join', (['path', 'program'], {}), '(path, program)\n', (1025, 1040), False, 'import os\n'), ((3054, 3077), 'shutil.move', 'shutil.move', (['fp', 'mv_dir'], {}), '(fp, mv_dir)\n', (3065, 3077), False, 'import shutil\n'), ((4090, 4110), 'glob.glob', 'glob.glob', (['file_glob'], {}), '(file_glob)\n', (4099, 4110), False, 'import glob\n'), ((549, 568), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (562, 568), False, 'import os\n')] |
#!/usr/bin/env python
from constants import *
import numpy as np
def regrid(self):
'''
Called in both firn_density_spin and firn_density_nospin
There are 3 subgrids in the regrid module. Grid 1 is the high resolution grid near the surface. Grid 2 is the lower resolution grid at greater depths; a user-defined number of nodes (self.c['nodestocombine']; refer as NTC here) are combined occasionally (every NTC time steps) to make one new node within grid 2. Grid 3 is at the bottom and has split up one grid 2 node back into a high-resolution grid (1 node into NTC nodes), which can be removed at each time step to keep the model Lagrangian.
the variable gridtrack keeps track of which subgrid each node is in.
'''
ind10 = np.where(self.gridtrack==1)[0] # all of the nodes in subgrid 1.
ind1 = np.where(self.gridtrack==1)[0][-1*self.c['nodestocombine']:] # the last NTC nodes of subgrid 1; will be combined into 1 node within subgrid 2.
ind1a = ind1[0]
ind1b = ind1[-1]
ind0 = ind1[0] - 1 # new last node of grid 1
### create the properties of the new subgrid 2 node
g2dz = np.array([np.sum(self.dz[ind1])])
g2mass = np.sum(self.mass[ind1])
g2rho = g2mass/g2dz
g2Tz0 = np.sum(self.Tz[ind1]*self.mass[ind1])
g2Tz = np.array([g2Tz0 / g2mass]) # Use a weighted average for temperature (effectively the enthalpy)
g2gt = 2 #gridtrack
g2age = np.mean(self.age[ind1])
# g2bm = np.mean(self.bdot_mean[ind1])
g2bm0 = np.sum(self.bdot_mean[ind1]*self.mass[ind1])
g2bm = np.array([g2bm0 / g2mass])
g2lwc = np.sum(self.LWC[ind1])
### split up the last node in grid 2 into NTC nodes. Each node retains the density, age, etc of the old subgrid 2 node.
g3dz = self.dz[-1]/self.nodestocombine * np.ones(self.nodestocombine)
g3rho = self.rho[-1] * np.ones(self.nodestocombine)
g3mass = g3rho * g3dz
g3gt = 3 * np.ones(self.nodestocombine)
g3Tz = self.Tz[-1]* np.ones(self.nodestocombine)
g3age = self.age[-1]*np.ones(self.nodestocombine)
g3bm = self.bdot_mean[-1]*np.ones(self.nodestocombine)
g3lwc = self.LWC[-1]/self.nodestocombine * np.ones(self.nodestocombine)
### combine the new and old nodes into the full grid.
self.dz = np.concatenate((self.dz[0:ind1a],g2dz,self.dz[ind1b+1:-1],g3dz))
self.z = self.dz.cumsum(axis=0)
self.z = np.concatenate(([0], self.z[:-1]))
self.rho = np.concatenate((self.rho[0:ind1a],g2rho,self.rho[ind1b+1:-1],g3rho))
self.Tz = np.concatenate((self.Tz[0:ind1a],g2Tz,self.Tz[ind1b+1:-1],g3Tz))
self.mass = np.concatenate((self.mass[0:ind1a],[g2mass],self.mass[ind1b+1:-1],g3mass))
self.sigma = self.mass * self.dx * GRAVITY
self.sigma = self.sigma.cumsum(axis = 0)
self.mass_sum = self.mass.cumsum(axis = 0)
self.age = np.concatenate((self.age[0:ind1a],[g2age],self.age[ind1b+1:-1],g3age))
self.bdot_mean = np.concatenate((self.bdot_mean[0:ind1a],g2bm,self.bdot_mean[ind1b+1:-1],g3bm))
self.LWC = np.concatenate((self.LWC[0:ind1a],[g2lwc],self.LWC[ind1b+1:-1],g3lwc))
self.gridtrack = np.concatenate((self.gridtrack[0:ind1a],[g2gt],self.gridtrack[ind1b+1:-1],g3gt))
if self.c['physGrain']:
#g2r2 = np.array([np.mean(self.r2)])
g2r2 = np.mean(self.r2[ind1]) # VV added
g3r2 = self.r2[-1]* np.ones(self.nodestocombine)
self.r2 = np.concatenate((self.r2[0:ind1a],[g2r2],self.r2[ind1b+1:-1],g3r2))
return self.dz, self.z, self.rho, self.Tz, self.mass, self.sigma, self.mass_sum, self.age, self.bdot_mean, self.LWC, self.gridtrack, self.r2
def init_regrid(self):
'''
Used in firn_density_spin for the initial regridding.
'''
grid1b = self.c['grid1bottom']
self.nodestocombine = self.c['nodestocombine']
ind1 = np.where(self.z<grid1b)[0]
ind2 = np.where(self.z>=grid1b)[0]
grid1z = self.z[ind1]
grid2z = self.z[ind2[0]::self.nodestocombine]
self.z = np.concatenate((grid1z,grid2z))
grid3z = self.z[-1] + np.cumsum(self.dz[-1*self.nodestocombine:])
self.z = np.concatenate((self.z,grid3z))
self.dz = np.diff(self.z)
self.dz = np.append(self.dz, self.dz[-1])
self.gridLen = len(self.z)
self.dx = np.ones(self.gridLen)
self.gridtrack = 2 * np.ones(self.gridLen)
self.gridtrack[ind1] = 1
self.gridtrack[-1*self.nodestocombine:] = 3
print('After regrid, grid length is', self.gridLen)
return self.nodestocombine, self.z, self.dz, self.gridLen, self.dx, self.gridtrack
| [
"numpy.sum",
"numpy.ones",
"numpy.append",
"numpy.cumsum",
"numpy.mean",
"numpy.array",
"numpy.diff",
"numpy.where",
"numpy.concatenate"
] | [((1191, 1214), 'numpy.sum', 'np.sum', (['self.mass[ind1]'], {}), '(self.mass[ind1])\n', (1197, 1214), True, 'import numpy as np\n'), ((1255, 1294), 'numpy.sum', 'np.sum', (['(self.Tz[ind1] * self.mass[ind1])'], {}), '(self.Tz[ind1] * self.mass[ind1])\n', (1261, 1294), True, 'import numpy as np\n'), ((1307, 1333), 'numpy.array', 'np.array', (['[g2Tz0 / g2mass]'], {}), '([g2Tz0 / g2mass])\n', (1315, 1333), True, 'import numpy as np\n'), ((1443, 1466), 'numpy.mean', 'np.mean', (['self.age[ind1]'], {}), '(self.age[ind1])\n', (1450, 1466), True, 'import numpy as np\n'), ((1525, 1571), 'numpy.sum', 'np.sum', (['(self.bdot_mean[ind1] * self.mass[ind1])'], {}), '(self.bdot_mean[ind1] * self.mass[ind1])\n', (1531, 1571), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.array', 'np.array', (['[g2bm0 / g2mass]'], {}), '([g2bm0 / g2mass])\n', (1592, 1610), True, 'import numpy as np\n'), ((1625, 1647), 'numpy.sum', 'np.sum', (['self.LWC[ind1]'], {}), '(self.LWC[ind1])\n', (1631, 1647), True, 'import numpy as np\n'), ((2317, 2386), 'numpy.concatenate', 'np.concatenate', (['(self.dz[0:ind1a], g2dz, self.dz[ind1b + 1:-1], g3dz)'], {}), '((self.dz[0:ind1a], g2dz, self.dz[ind1b + 1:-1], g3dz))\n', (2331, 2386), True, 'import numpy as np\n'), ((2449, 2483), 'numpy.concatenate', 'np.concatenate', (['([0], self.z[:-1])'], {}), '(([0], self.z[:-1]))\n', (2463, 2483), True, 'import numpy as np\n'), ((2506, 2579), 'numpy.concatenate', 'np.concatenate', (['(self.rho[0:ind1a], g2rho, self.rho[ind1b + 1:-1], g3rho)'], {}), '((self.rho[0:ind1a], g2rho, self.rho[ind1b + 1:-1], g3rho))\n', (2520, 2579), True, 'import numpy as np\n'), ((2597, 2666), 'numpy.concatenate', 'np.concatenate', (['(self.Tz[0:ind1a], g2Tz, self.Tz[ind1b + 1:-1], g3Tz)'], {}), '((self.Tz[0:ind1a], g2Tz, self.Tz[ind1b + 1:-1], g3Tz))\n', (2611, 2666), True, 'import numpy as np\n'), ((2684, 2763), 'numpy.concatenate', 'np.concatenate', (['(self.mass[0:ind1a], [g2mass], self.mass[ind1b + 1:-1], g3mass)'], {}), '((self.mass[0:ind1a], [g2mass], self.mass[ind1b + 1:-1], g3mass))\n', (2698, 2763), True, 'import numpy as np\n'), ((2932, 3007), 'numpy.concatenate', 'np.concatenate', (['(self.age[0:ind1a], [g2age], self.age[ind1b + 1:-1], g3age)'], {}), '((self.age[0:ind1a], [g2age], self.age[ind1b + 1:-1], g3age))\n', (2946, 3007), True, 'import numpy as np\n'), ((3025, 3112), 'numpy.concatenate', 'np.concatenate', (['(self.bdot_mean[0:ind1a], g2bm, self.bdot_mean[ind1b + 1:-1], g3bm)'], {}), '((self.bdot_mean[0:ind1a], g2bm, self.bdot_mean[ind1b + 1:-1],\n g3bm))\n', (3039, 3112), True, 'import numpy as np\n'), ((3126, 3201), 'numpy.concatenate', 'np.concatenate', (['(self.LWC[0:ind1a], [g2lwc], self.LWC[ind1b + 1:-1], g3lwc)'], {}), '((self.LWC[0:ind1a], [g2lwc], self.LWC[ind1b + 1:-1], g3lwc))\n', (3140, 3201), True, 'import numpy as np\n'), ((3219, 3309), 'numpy.concatenate', 'np.concatenate', (['(self.gridtrack[0:ind1a], [g2gt], self.gridtrack[ind1b + 1:-1], g3gt)'], {}), '((self.gridtrack[0:ind1a], [g2gt], self.gridtrack[ind1b + 1:-\n 1], g3gt))\n', (3233, 3309), True, 'import numpy as np\n'), ((4135, 4167), 'numpy.concatenate', 'np.concatenate', (['(grid1z, grid2z)'], {}), '((grid1z, grid2z))\n', (4149, 4167), True, 'import numpy as np\n'), ((4268, 4300), 'numpy.concatenate', 'np.concatenate', (['(self.z, grid3z)'], {}), '((self.z, grid3z))\n', (4282, 4300), True, 'import numpy as np\n'), ((4322, 4337), 'numpy.diff', 'np.diff', (['self.z'], {}), '(self.z)\n', (4329, 4337), True, 'import numpy as np\n'), ((4360, 4391), 'numpy.append', 'np.append', (['self.dz', 'self.dz[-1]'], {}), '(self.dz, self.dz[-1])\n', (4369, 4391), True, 'import numpy as np\n'), ((4448, 4469), 'numpy.ones', 'np.ones', (['self.gridLen'], {}), '(self.gridLen)\n', (4455, 4469), True, 'import numpy as np\n'), ((753, 782), 'numpy.where', 'np.where', (['(self.gridtrack == 1)'], {}), '(self.gridtrack == 1)\n', (761, 782), True, 'import numpy as np\n'), ((1822, 1850), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (1829, 1850), True, 'import numpy as np\n'), ((1880, 1908), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (1887, 1908), True, 'import numpy as np\n'), ((1954, 1982), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (1961, 1982), True, 'import numpy as np\n'), ((2010, 2038), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (2017, 2038), True, 'import numpy as np\n'), ((2066, 2094), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (2073, 2094), True, 'import numpy as np\n'), ((2128, 2156), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (2135, 2156), True, 'import numpy as np\n'), ((2206, 2234), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (2213, 2234), True, 'import numpy as np\n'), ((3398, 3420), 'numpy.mean', 'np.mean', (['self.r2[ind1]'], {}), '(self.r2[ind1])\n', (3405, 3420), True, 'import numpy as np\n'), ((3511, 3582), 'numpy.concatenate', 'np.concatenate', (['(self.r2[0:ind1a], [g2r2], self.r2[ind1b + 1:-1], g3r2)'], {}), '((self.r2[0:ind1a], [g2r2], self.r2[ind1b + 1:-1], g3r2))\n', (3525, 3582), True, 'import numpy as np\n'), ((3942, 3967), 'numpy.where', 'np.where', (['(self.z < grid1b)'], {}), '(self.z < grid1b)\n', (3950, 3967), True, 'import numpy as np\n'), ((3991, 4017), 'numpy.where', 'np.where', (['(self.z >= grid1b)'], {}), '(self.z >= grid1b)\n', (3999, 4017), True, 'import numpy as np\n'), ((4202, 4247), 'numpy.cumsum', 'np.cumsum', (['self.dz[-1 * self.nodestocombine:]'], {}), '(self.dz[-1 * self.nodestocombine:])\n', (4211, 4247), True, 'import numpy as np\n'), ((4496, 4517), 'numpy.ones', 'np.ones', (['self.gridLen'], {}), '(self.gridLen)\n', (4503, 4517), True, 'import numpy as np\n'), ((832, 861), 'numpy.where', 'np.where', (['(self.gridtrack == 1)'], {}), '(self.gridtrack == 1)\n', (840, 861), True, 'import numpy as np\n'), ((1153, 1174), 'numpy.sum', 'np.sum', (['self.dz[ind1]'], {}), '(self.dz[ind1])\n', (1159, 1174), True, 'import numpy as np\n'), ((3464, 3492), 'numpy.ones', 'np.ones', (['self.nodestocombine'], {}), '(self.nodestocombine)\n', (3471, 3492), True, 'import numpy as np\n')] |
"""
Misc Utility functions
"""
import os
import logging
import datetime
import numpy as np
from collections import OrderedDict
def recursive_glob(rootdir=".", suffix=""):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)
]
def alpha_blend(input_image, segmentation_mask, alpha=0.5):
"""Alpha Blending utility to overlay RGB masks on RBG images
:param input_image is a np.ndarray with 3 channels
:param segmentation_mask is a np.ndarray with 3 channels
:param alpha is a float value
"""
blended = np.zeros(input_image.size, dtype=np.float32)
blended = input_image * alpha + segmentation_mask * (1 - alpha)
return blended
def convert_state_dict(state_dict):
"""Converts a state dict saved from a dataParallel module to normal
module state_dict inplace
:param state_dict is the loaded DataParallel model_state
"""
if not next(iter(state_dict)).startswith("module."):
return state_dict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def get_logger(logdir):
logger = logging.getLogger('ptsemseg')
ts = str(datetime.datetime.now()).split('.')[0].replace(" ", "_")
ts = ts.replace(":", "_").replace("-","_")
file_path = os.path.join(logdir, 'run_{}.log'.format(ts))
hdlr = logging.FileHandler(file_path)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger
| [
"logging.FileHandler",
"os.walk",
"numpy.zeros",
"datetime.datetime.now",
"logging.Formatter",
"collections.OrderedDict",
"os.path.join",
"logging.getLogger"
] | [((838, 882), 'numpy.zeros', 'np.zeros', (['input_image.size'], {'dtype': 'np.float32'}), '(input_image.size, dtype=np.float32)\n', (846, 882), True, 'import numpy as np\n'), ((1295, 1308), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1306, 1308), False, 'from collections import OrderedDict\n'), ((1484, 1513), 'logging.getLogger', 'logging.getLogger', (['"""ptsemseg"""'], {}), "('ptsemseg')\n", (1501, 1513), False, 'import logging\n'), ((1704, 1734), 'logging.FileHandler', 'logging.FileHandler', (['file_path'], {}), '(file_path)\n', (1723, 1734), False, 'import logging\n'), ((1751, 1809), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (1768, 1809), False, 'import logging\n'), ((360, 392), 'os.path.join', 'os.path.join', (['looproot', 'filename'], {}), '(looproot, filename)\n', (372, 392), False, 'import os\n'), ((431, 447), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (438, 447), False, 'import os\n'), ((1527, 1550), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1548, 1550), False, 'import datetime\n')] |
import numpy as np
import os
import torch
import subprocess
import matplotlib.pyplot as plt
import time
import pickle
import re
def save_pickle(features, labels, path, name):
features, labels = np.array(features).astype(np.float32), np.array(labels).astype(np.float32)
x = time.asctime()
filename = name + '_' + str(re.sub('[ ]', '_', x) + '.pkl')
dir = os.path.join(path, filename)
with open(dir, "wb") as f:
pickle.dump([features, labels], f)
def heatmap2D(features):
x1, x2 = features[:, 0], features[:, 1]
heatmap, xedges, yedges = np.histogram2d(x1, x2, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def int_tuple(s):
return tuple(int(i) for i in s.split(','))
def find_nan(variable, var_name):
variable_n = variable.data.cpu().numpy()
if np.isnan(variable_n).any():
exit('%s has nan' % var_name)
def get_gpu_memory():
torch.cuda.synchronize()
opts = [
'nvidia-smi', '-q', '--gpu=' + str(1), '|', 'grep', '"Used GPU Memory"'
]
cmd = str.join(' ', opts)
ps = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ps.communicate()[0].decode('utf-8')
output = output.split("\n")[0].split(":")
consumed_mem = int(output[1].strip().split(" ")[0])
return consumed_mem
def get_total_norm(parameters, norm_type=2):
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
try:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm**norm_type
total_norm = total_norm**(1. / norm_type)
except:
continue
return total_norm
def get_dset_path(dset_name, dset_type):
_dir = os.path.dirname(__file__)
_dir = _dir.split("/")[:-1]
_dir = "/".join(_dir)
return os.path.join(_dir, 'datasets', dset_name, dset_type)
def bool_flag(s):
if s == '1':
return True
elif s == '0':
return False
msg = 'Invalid value "%s" for bool flag (should be 0 or 1)'
raise ValueError(msg % s)
def Save_Network(PATH, epoch, net, optimizer):
state = {
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, PATH)
def Load_Network(PATH, net, optimizer):
state = torch.load(PATH)
net.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
def plot_all(prediction, actual, title, model_name, idx):
pred1, pred2, pred3 = prediction[:, 0], prediction[:, 1], prediction[:, 2]
actual1, actual2, actual3 = actual[:, 0], actual[:, 1], actual[:, 2]
x = np.arange(1, len(pred1)+1)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(10,10))
fig.suptitle(title)
ax1.plot(x, pred1 , label='predicted')
ax1.plot(x, actual1, label='actual' )
ax1.set_ylabel('dx [mm]')
ax2.plot(x, pred2, label='predicted')
ax2.plot(x, actual2, label='actual' )
ax2.set_ylabel('dy [mm]')
ax3.plot(x, pred3, label='predicted')
ax3.plot(x, actual3, label='actual' )
ax3.set_ylabel('d_theta [radians]')
ax3.set_xlabel('time step')
ax1.legend()
# plt.show()
dir = r'E:\CarProject\NewCode_Project\plot'
checkpoint_path = os.path.join(dir, model_name + '_' + str(idx) + '.png')
plt.savefig(checkpoint_path, bbox_inches='tight')
plt.close()
def plot_live(prediction, actual, title): # input: array[[dx,dy.d_theta],...] shape:(num_of_samples,3)
plt.gcf().clear()
pred1, pred2, pred3 = prediction[:, 0], prediction[:, 1], prediction[:, 2]
actual1, actual2, actual3 = actual[:, 0], actual[:, 1], actual[:, 2]
x = np.arange(1, len(pred1)+1)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(10,10))
fig.suptitle(title)
ax1.plot(x, pred1, label='predicted')
ax1.plot(x, actual1, label='actual')
ax1.set_ylabel('dx [mm]')
ax2.plot(x, pred2)
ax2.plot(x, actual2)
ax2.set_ylabel('dy [mm]')
ax3.plot(x, pred3)
ax3.plot(x, actual3)
ax3.set_ylabel('d_theta [radians]')
ax3.set_xlabel('time step')
ax1.legend()
plt.draw()
plt.pause(0.00000000001)
def save_states(checkpoint_state, path):
s = checkpoint_state['state']
sp = checkpoint_state['state_predict']
with open(path, "wb") as f:
pickle.dump([s, sp], f)
def loss_graph(train_loss, val_loss):
plt.plot(range(len(train_loss)), train_loss, label="train_loss")
plt.plot(range(len(val_loss)), val_loss, label="train_loss")
plt.legend()
plt.savefig('lossVSvalidation.png')
plt.show()
def predict_batch(x, model):
x_min = -120
x_max = 120
dx_min = -25
dx_max = 25
dy_min = -50
dy_max = 50
dtheta_min = -1.4
dtheta_max = 1.4
action_nor = (x - x_min) / (x_max - x_min)
action_nor = torch.tensor(action_nor, dtype=torch.float)
with torch.no_grad():
action_nor = action_nor.cuda().unsqueeze(1)
prediction = model(action_nor)
prediction = prediction.detach().cpu().numpy()
prediction[:,0] = prediction[:,0] * ((dx_max) - (dx_min)) + dx_min
prediction[:,1] = prediction[:,1] * ((dy_max) - (dy_min)) + dy_min
prediction[:,2] = prediction[:,2] * ((dtheta_max) - (dtheta_min)) + dtheta_min
return prediction
def plot_loss(train_loss, val_loss, loss_plot_name):
plt.figure(figsize=(10, 7))
plt.plot(train_loss, color='orange', label='train loss')
plt.plot(val_loss, color='red', label='validataion loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig(loss_plot_name, bbox_inches='tight')
plt.close() | [
"torch.cuda.synchronize",
"matplotlib.pyplot.savefig",
"pickle.dump",
"matplotlib.pyplot.clf",
"numpy.isnan",
"matplotlib.pyplot.figure",
"torch.no_grad",
"os.path.join",
"time.asctime",
"numpy.histogram2d",
"matplotlib.pyplot.imshow",
"os.path.dirname",
"torch.load",
"matplotlib.pyplot.cl... | [((293, 307), 'time.asctime', 'time.asctime', ([], {}), '()\n', (305, 307), False, 'import time\n'), ((384, 412), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (396, 412), False, 'import os\n'), ((593, 624), 'numpy.histogram2d', 'np.histogram2d', (['x1', 'x2'], {'bins': '(50)'}), '(x1, x2, bins=50)\n', (607, 624), True, 'import numpy as np\n'), ((691, 700), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (698, 700), True, 'import matplotlib.pyplot as plt\n'), ((706, 758), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap.T'], {'extent': 'extent', 'origin': '"""lower"""'}), "(heatmap.T, extent=extent, origin='lower')\n", (716, 758), True, 'import matplotlib.pyplot as plt\n'), ((764, 774), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (772, 774), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1058), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1056, 1058), False, 'import torch\n'), ((1202, 1290), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess\n .STDOUT)\n', (1218, 1290), False, 'import subprocess\n'), ((2008, 2033), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2023, 2033), False, 'import os\n'), ((2106, 2158), 'os.path.join', 'os.path.join', (['_dir', '"""datasets"""', 'dset_name', 'dset_type'], {}), "(_dir, 'datasets', dset_name, dset_type)\n", (2118, 2158), False, 'import os\n'), ((2552, 2575), 'torch.save', 'torch.save', (['state', 'PATH'], {}), '(state, PATH)\n', (2562, 2575), False, 'import torch\n'), ((2632, 2648), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (2642, 2648), False, 'import torch\n'), ((3027, 3076), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(3, 1, sharex=True, figsize=(10, 10))\n', (3039, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3666, 3715), 'matplotlib.pyplot.savefig', 'plt.savefig', (['checkpoint_path'], {'bbox_inches': '"""tight"""'}), "(checkpoint_path, bbox_inches='tight')\n", (3677, 3715), True, 'import matplotlib.pyplot as plt\n'), ((3721, 3732), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3730, 3732), True, 'import matplotlib.pyplot as plt\n'), ((4080, 4129), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(3, 1, sharex=True, figsize=(10, 10))\n', (4092, 4129), True, 'import matplotlib.pyplot as plt\n'), ((4512, 4522), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4520, 4522), True, 'import matplotlib.pyplot as plt\n'), ((4528, 4544), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-11)'], {}), '(1e-11)\n', (4537, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4924, 4936), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4934, 4936), True, 'import matplotlib.pyplot as plt\n'), ((4942, 4977), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""lossVSvalidation.png"""'], {}), "('lossVSvalidation.png')\n", (4953, 4977), True, 'import matplotlib.pyplot as plt\n'), ((4983, 4993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4991, 4993), True, 'import matplotlib.pyplot as plt\n'), ((5242, 5285), 'torch.tensor', 'torch.tensor', (['action_nor'], {'dtype': 'torch.float'}), '(action_nor, dtype=torch.float)\n', (5254, 5285), False, 'import torch\n'), ((5770, 5797), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (5780, 5797), True, 'import matplotlib.pyplot as plt\n'), ((5803, 5859), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss'], {'color': '"""orange"""', 'label': '"""train loss"""'}), "(train_loss, color='orange', label='train loss')\n", (5811, 5859), True, 'import matplotlib.pyplot as plt\n'), ((5865, 5922), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'color': '"""red"""', 'label': '"""validataion loss"""'}), "(val_loss, color='red', label='validataion loss')\n", (5873, 5922), True, 'import matplotlib.pyplot as plt\n'), ((5928, 5948), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5938, 5948), True, 'import matplotlib.pyplot as plt\n'), ((5954, 5972), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (5964, 5972), True, 'import matplotlib.pyplot as plt\n'), ((5978, 5990), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5988, 5990), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6044), 'matplotlib.pyplot.savefig', 'plt.savefig', (['loss_plot_name'], {'bbox_inches': '"""tight"""'}), "(loss_plot_name, bbox_inches='tight')\n", (6007, 6044), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6061), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6059, 6061), True, 'import matplotlib.pyplot as plt\n'), ((454, 488), 'pickle.dump', 'pickle.dump', (['[features, labels]', 'f'], {}), '([features, labels], f)\n', (465, 488), False, 'import pickle\n'), ((4718, 4741), 'pickle.dump', 'pickle.dump', (['[s, sp]', 'f'], {}), '([s, sp], f)\n', (4729, 4741), False, 'import pickle\n'), ((5296, 5311), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5309, 5311), False, 'import torch\n'), ((937, 957), 'numpy.isnan', 'np.isnan', (['variable_n'], {}), '(variable_n)\n', (945, 957), True, 'import numpy as np\n'), ((3844, 3853), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3851, 3853), True, 'import matplotlib.pyplot as plt\n'), ((209, 227), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (217, 227), True, 'import numpy as np\n'), ((248, 264), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (256, 264), True, 'import numpy as np\n'), ((341, 362), 're.sub', 're.sub', (['"""[ ]"""', '"""_"""', 'x'], {}), "('[ ]', '_', x)\n", (347, 362), False, 'import re\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hilbert
from src import XMitt
plt.ion()
exper_file = 'src/experiments/canope_setup.toml'
xmitt = XMitt(exper_file, 1.)
p_sca = []
#for i in range(3):
xmitt.generate_realization()
xmitt.surface_realization()
p_sca.append(xmitt.ping_surface())
p_sca = np.array(p_sca)
fig, ax = plt.subplots()
ax.plot(xmitt.t_a, 20 * np.log10(np.abs(hilbert(p_sca))).T)
ax.grid()
| [
"src.XMitt",
"matplotlib.pyplot.ion",
"numpy.array",
"scipy.signal.hilbert",
"matplotlib.pyplot.subplots"
] | [((108, 117), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (115, 117), True, 'import matplotlib.pyplot as plt\n'), ((176, 198), 'src.XMitt', 'XMitt', (['exper_file', '(1.0)'], {}), '(exper_file, 1.0)\n', (181, 198), False, 'from src import XMitt\n'), ((331, 346), 'numpy.array', 'np.array', (['p_sca'], {}), '(p_sca)\n', (339, 346), True, 'import numpy as np\n'), ((358, 372), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (370, 372), True, 'import matplotlib.pyplot as plt\n'), ((413, 427), 'scipy.signal.hilbert', 'hilbert', (['p_sca'], {}), '(p_sca)\n', (420, 427), False, 'from scipy.signal import hilbert\n')] |
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='calculation radius of gyration using MDAnalysis')
## args
parser.add_argument('-i', '--input', default='traj.trr', nargs='?',
help='input trajectory file')
parser.add_argument('-s', '--structure', default='topol.tpr', nargs='?',
help='.tpr structure file')
parser.add_argument('-select', '--select', nargs='?',
help='selection of each molecule')
parser.add_argument('-nmol', '--nmol', nargs='?', type=int,
help='# molecules')
parser.add_argument('-b', '--begin', default=-1, nargs='?', type=int,
help='begining frame (-1: last half trajectory)')
parser.add_argument('-o', '--output', default='pol.rg', nargs='?',
help='output filename for Rg files')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
import MDAnalysis as mda
import numpy as np
## timer
start_proc, start_prof = hjung.time.init()
# read trajectory
u = mda.Universe(args.structure,args.input)
n_frames = len(u.trajectory)
skip_frames = 0
if args.begin == -1:
skip_frames = int(n_frames/2)
print(" skip {} frames".format(skip_frames))
else:
skip_frames = args.begin
if args.begin >= n_frames:
raise ValueError("wrong args.begin because of > n_frames")
n_frames = n_frames - skip_frames
atomtxt = open(args.select).read()
#hjung.polymer.check_traj_connectivity(u,str(atomtxt),args.nmol,1.8,'random')
select_mol = u.select_atoms(str(atomtxt))
if len(select_mol)%args.nmol != 0:
raise ValueError("wrong # molecules, (args.nmol, select_mol) {} {} ".format(args.nmol, len(select_mol)))
n_deg = int(len(select_mol)/args.nmol)
print("assume {} atoms you select per molecule".format(n_deg))
# calculation of Rg
data_rg = np.zeros((n_frames,args.nmol))
i_frame = 0
imod = hjung.time.process_init()
for ts in u.trajectory[skip_frames:]:
for i_mol in range(args.nmol):
mol = select_mol.atoms[n_deg*i_mol:n_deg*(i_mol+1)]
data_rg[i_frame,i_mol] = mol.radius_of_gyration()
i_frame = i_frame + 1
imod = hjung.time.process_print(i_frame, n_frames, imod)
# save raw rg data file
np.savetxt(args.output, data_rg,
header='Rg for each molecules (mean = {} +- {}) with {} frames'.format(np.mean(data_rg),np.std(data_rg),n_frames), fmt='%f', comments='# ')
np.save(args.output, data_rg)
print("average Rg = {} +- {}".format(np.mean(data_rg),np.std(data_rg)))
# save avg file
data_rg_tavg = np.column_stack((np.mean(data_rg, axis=0),np.std(data_rg, axis=0)))
np.savetxt(args.output+'.avg', data_rg_tavg,
header='averaged Rg for each molecule with {} frames'.format(n_frames), fmt='%f', comments='# ')
## timer
hjung.time.end_print(start_proc, start_prof) | [
"sys.path.append",
"hjung.time.init",
"numpy.save",
"argparse.ArgumentParser",
"hjung.time.end_print",
"numpy.std",
"hjung.time.process_init",
"numpy.zeros",
"MDAnalysis.Universe",
"hjung.time.process_print",
"numpy.mean"
] | [((52, 204), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""calculation radius of gyration using MDAnalysis"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'calculation radius of gyration using MDAnalysis')\n", (75, 204), False, 'import argparse\n'), ((1109, 1156), 'sys.path.append', 'sys.path.append', (['"""/home/htjung/Utility/python/"""'], {}), "('/home/htjung/Utility/python/')\n", (1124, 1156), False, 'import sys\n'), ((1276, 1293), 'hjung.time.init', 'hjung.time.init', ([], {}), '()\n', (1291, 1293), False, 'import hjung\n'), ((1320, 1360), 'MDAnalysis.Universe', 'mda.Universe', (['args.structure', 'args.input'], {}), '(args.structure, args.input)\n', (1332, 1360), True, 'import MDAnalysis as mda\n'), ((2105, 2136), 'numpy.zeros', 'np.zeros', (['(n_frames, args.nmol)'], {}), '((n_frames, args.nmol))\n', (2113, 2136), True, 'import numpy as np\n'), ((2157, 2182), 'hjung.time.process_init', 'hjung.time.process_init', ([], {}), '()\n', (2180, 2182), False, 'import hjung\n'), ((2652, 2681), 'numpy.save', 'np.save', (['args.output', 'data_rg'], {}), '(args.output, data_rg)\n', (2659, 2681), True, 'import numpy as np\n'), ((3019, 3063), 'hjung.time.end_print', 'hjung.time.end_print', (['start_proc', 'start_prof'], {}), '(start_proc, start_prof)\n', (3039, 3063), False, 'import hjung\n'), ((2397, 2446), 'hjung.time.process_print', 'hjung.time.process_print', (['i_frame', 'n_frames', 'imod'], {}), '(i_frame, n_frames, imod)\n', (2421, 2446), False, 'import hjung\n'), ((2720, 2736), 'numpy.mean', 'np.mean', (['data_rg'], {}), '(data_rg)\n', (2727, 2736), True, 'import numpy as np\n'), ((2737, 2752), 'numpy.std', 'np.std', (['data_rg'], {}), '(data_rg)\n', (2743, 2752), True, 'import numpy as np\n'), ((2807, 2831), 'numpy.mean', 'np.mean', (['data_rg'], {'axis': '(0)'}), '(data_rg, axis=0)\n', (2814, 2831), True, 'import numpy as np\n'), ((2832, 2855), 'numpy.std', 'np.std', (['data_rg'], {'axis': '(0)'}), '(data_rg, axis=0)\n', (2838, 2855), True, 'import numpy as np\n'), ((2582, 2598), 'numpy.mean', 'np.mean', (['data_rg'], {}), '(data_rg)\n', (2589, 2598), True, 'import numpy as np\n'), ((2599, 2614), 'numpy.std', 'np.std', (['data_rg'], {}), '(data_rg)\n', (2605, 2614), True, 'import numpy as np\n')] |
"""
Various tensorflow utilities
"""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.ops import variables
import functools
def passthrough(obj, value): return value
try:
variables.Variable._build_initializer_expr=passthrough
except: # older versions of TF don't have this
pass
def int_shape(x):
return list(map(int, x.get_shape()))
def concat_elu(x):
""" like concatenated ReLU (http://arxiv.org/abs/1603.05201), but then with ELU """
axis = len(x.get_shape()) - 1
return tf.nn.elu(tf.concat([x, -x], axis))
def log_sum_exp(x):
""" numerically stable log_sum_exp implementation that prevents overflow """
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis)
m2 = tf.reduce_max(x, axis, keep_dims=True)
return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))
def log_prob_from_logits(x):
""" numerically stable log_softmax implementation that prevents overflow """
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis, keep_dims=True)
return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))
def discretized_mix_logistic_loss(x, l, sum_all=True):
""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """
xs = int_shape(
x) # true image (i.e. labels) to regress to, e.g. (B,32,32,3)
ls = int_shape(l) # predicted distribution, e.g. (B,32,32,100)
# here and below: unpacking the params of the mixture of logistics
nr_mix = int(ls[-1] / 10)
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
means = l[:, :, :, :, :nr_mix]
log_scales = tf.maximum(l[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])
# here and below: getting the means and adjusting them based on preceding
# sub-pixels
x = tf.reshape(x, xs + [1]) + tf.zeros(xs + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :]
* x[:, :, :, 0, :], [xs[0], xs[1], xs[2], 1, nr_mix])
m3 = tf.reshape(means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * x[:, :, :, 1, :], [xs[0], xs[1], xs[2], 1, nr_mix])
means = tf.concat([tf.reshape(means[:, :, :, 0, :], [
xs[0], xs[1], xs[2], 1, nr_mix]), m2, m3], 3)
centered_x = x - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
# now select the right output: left edge case, right edge case, normal
# case, extremely low prob case (doesn't actually happen for us)
# this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select()
# log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta)))
# robust version, that still works if probabilities are below 1e-5 (which never happens in our code)
# tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs
# the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue
# if the probability on a sub-pixel is below 1e-5, we use an approximation
# based on the assumption that the log-density is constant in the bin of
# the observed sub-pixel value
log_probs = tf.where(x < -0.999, log_cdf_plus, tf.where(x > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)), log_pdf_mid - np.log(127.5))))
log_probs = tf.reduce_sum(log_probs, 3) + log_prob_from_logits(logit_probs)
if sum_all:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])
def discretized_mix_logistic_loss_per_chn(x, lr, lg, lb, sum_all=True):
""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """
xs = int_shape(x) # true image (i.e. labels) to regress to, e.g. (B,32,32,3)
ls = int_shape(lr) # predicted distribution, e.g. (B,32,32,100)
# here and below: unpacking the params of the mixture of logistics
nr_mix = int(ls[-1] / 3)
logit_probs = lr[:, :, :, :nr_mix]
means = tf.concat([lr[:, :, :, None, nr_mix:nr_mix*2], lg[:, :, :, None, nr_mix:nr_mix*2], lb[:, :, :, None, nr_mix:nr_mix*2],], axis=-2)
log_scales = tf.concat([lr[:, :, :, None, nr_mix*2:nr_mix*3], lg[:, :, :, None, nr_mix*2:nr_mix*3], lb[:, :, :, None, nr_mix*2:nr_mix*3],], axis=-2)
log_scales = tf.maximum(log_scales, -7.)
x = tf.reshape(x, xs + [1]) + tf.zeros(xs + [nr_mix])
centered_x = x - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
# now select the right output: left edge case, right edge case, normal
# case, extremely low prob case (doesn't actually happen for us)
# this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select()
# log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta)))
# robust version, that still works if probabilities are below 1e-5 (which never happens in our code)
# tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs
# the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue
# if the probability on a sub-pixel is below 1e-5, we use an approximation
# based on the assumption that the log-density is constant in the bin of
# the observed sub-pixel value
log_probs = tf.where(x < -0.999, log_cdf_plus, tf.where(x > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)), log_pdf_mid - np.log(127.5))))
log_probs = tf.reduce_sum(log_probs, 3) + log_prob_from_logits(logit_probs)
if sum_all:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])
def sample_from_discretized_mix_logistic(l, nr_mix):
ls = int_shape(l)
xs = ls[:-1] + [3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
def get_var_maybe_avg(var_name, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
v = tf.get_variable(var_name, **kwargs)
if ema is not None:
v = ema.average(v)
return v
def get_vars_maybe_avg(var_names, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(vn, ema, **kwargs))
return vars
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999, eps=1e-8):
''' Adam optimizer '''
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params)
else:
grads = cost_or_grads
t = tf.Variable(1., 'adam_t')
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
if mom1 > 0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
v_t = mom1 * v + (1. - mom1) * g
v_hat = v_t / (1. - tf.pow(mom1, t))
updates.append(v.assign(v_t))
else:
v_hat = g
mg_t = mom2 * mg + (1. - mom2) * tf.square(g)
mg_hat = mg_t / (1. - tf.pow(mom2, t))
g_t = v_hat / tf.sqrt(mg_hat + eps)
p_t = p - lr * g_t
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
updates.append(t.assign_add(1))
return tf.group(*updates)
def get_name(layer_name, counters):
''' utlity for keeping track of layer names '''
if not layer_name in counters:
counters[layer_name] = 0
name = layer_name + '_' + str(counters[layer_name])
counters[layer_name] += 1
return name
@add_arg_scope
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
''' fully connected layer '''
name = get_name('dense', counters)
with tf.variable_scope(name):
if init:
# data based initialization of parameters
V = tf.get_variable('V', [int(x.get_shape()[
1]), num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
x_init = tf.matmul(x, V_norm)
m_init, v_init = tf.nn.moments(x_init, [0])
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
g = tf.get_variable('g', dtype=tf.float32,
initializer=scale_init, trainable=True)
b = tf.get_variable('b', dtype=tf.float32,
initializer=-m_init * scale_init, trainable=True)
x_init = tf.reshape(
scale_init, [1, num_units]) * (x_init - tf.reshape(m_init, [1, num_units]))
if nonlinearity is not None:
x_init = nonlinearity(x_init)
return x_init
else:
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
# tf.assert_variables_initialized([V, g, b])
# use weight normalization (Salimans & Kingma, 2016)
x = tf.matmul(x, V)
scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
x = tf.reshape(scaler, [1, num_units]) * \
x + tf.reshape(b, [1, num_units])
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x)
return x
@add_arg_scope
def conv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
''' convolutional layer '''
name = get_name('conv2d', counters)
with tf.variable_scope(name):
if init:
# data based initialization of parameters
V = tf.get_variable('V', filter_size + [int(x.get_shape()[-1]), num_filters],
tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2])
x_init = tf.nn.conv2d(x, V_norm, [1] + stride + [1], pad)
m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
scale_init = init_scale / tf.sqrt(v_init + 1e-8)
g = tf.get_variable('g', dtype=tf.float32,
initializer=scale_init, trainable=True)
b = tf.get_variable('b', dtype=tf.float32,
initializer=-m_init * scale_init, trainable=True)
x_init = tf.reshape(scale_init, [
1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
if nonlinearity is not None:
x_init = nonlinearity(x_init)
return x_init
else:
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
# tf.assert_variables_initialized([V, g, b])
# use weight normalization (Salimans & Kingma, 2016)
W = tf.reshape(g, [1, 1, 1, num_filters]) * \
tf.nn.l2_normalize(V, [0, 1, 2])
# calculate convolutional layer output
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x)
return x
@add_arg_scope
def deconv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
''' transposed convolutional layer '''
name = get_name('deconv2d', counters)
xs = int_shape(x)
if pad == 'SAME':
target_shape = [xs[0], xs[1] * stride[0],
xs[2] * stride[1], num_filters]
else:
target_shape = [xs[0], xs[1] * stride[0] + filter_size[0] -
1, xs[2] * stride[1] + filter_size[1] - 1, num_filters]
with tf.variable_scope(name):
if init:
# data based initialization of parameters
V = tf.get_variable('V', filter_size + [num_filters, int(x.get_shape(
)[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3])
x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [
1] + stride + [1], padding=pad)
m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
scale_init = init_scale / tf.sqrt(v_init + 1e-8)
g = tf.get_variable('g', dtype=tf.float32,
initializer=scale_init, trainable=True)
b = tf.get_variable('b', dtype=tf.float32,
initializer=-m_init * scale_init, trainable=True)
x_init = tf.reshape(scale_init, [
1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
if nonlinearity is not None:
x_init = nonlinearity(x_init)
return x_init
else:
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
# tf.assert_variables_initialized([V, g, b])
# use weight normalization (Salimans & Kingma, 2016)
W = tf.reshape(g, [1, 1, num_filters, 1]) * \
tf.nn.l2_normalize(V, [0, 1, 3])
# calculate convolutional layer output
x = tf.nn.conv2d_transpose(
x, W, target_shape, [1] + stride + [1], padding=pad)
x = tf.nn.bias_add(x, b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x)
return x
@add_arg_scope
def nin(x, num_units, **kwargs):
""" a network in network layer (1x1 CONV) """
s = int_shape(x)
x = tf.reshape(x, [np.prod(s[:-1]), s[-1]])
x = dense(x, num_units, **kwargs)
return tf.reshape(x, s[:-1] + [num_units])
''' meta-layer consisting of multiple base layers '''
@add_arg_scope
def gated_resnet(x, a=None, h=None, nonlinearity=concat_elu, conv=conv2d, init=False, counters={}, ema=None, dropout_p=0., **kwargs):
xs = int_shape(x)
num_filters = xs[-1]
c1 = conv(nonlinearity(x), num_filters)
if a is not None: # add short-cut connection if auxiliary input 'a' is given
c1 += nin(nonlinearity(a), num_filters)
c1 = nonlinearity(c1)
if dropout_p > 0:
c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
c2 = conv(c1, num_filters * 2, init_scale=0.1)
# add projection of h vector if included: conditional generation
if h is not None:
with tf.variable_scope(get_name('conditional_weights', counters)):
hw = get_var_maybe_avg('hw', ema, shape=[int_shape(h)[-1], 2 * num_filters], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
if init:
hw = hw.initialized_value()
c2 += tf.reshape(tf.matmul(h, hw), [xs[0], 1, 1, 2 * num_filters])
# Is this 3,2 or 2,3 ?
a, b = tf.split(c2, 2, 3)
c3 = a * tf.nn.sigmoid(b)
return x + c3
''' utilities for shifting the image around, efficient alternative to masking convolutions '''
def down_shift(x, step=1):
xs = int_shape(x)
return tf.concat([tf.zeros([xs[0], step, xs[2], xs[3]]), x[:, :xs[1] - step, :, :]], 1)
def right_shift(x, step=1):
xs = int_shape(x)
return tf.concat([tf.zeros([xs[0], xs[1], step, xs[3]]), x[:, :, :xs[2] - step, :]], 2)
def left_shift(x, step=1):
xs = int_shape(x)
return tf.concat([x[:, :, step:, :], tf.zeros([xs[0], xs[1], step, xs[3]]),], 2)
@add_arg_scope
def down_shifted_conv2d(x, num_filters, filter_size=[2, 3], stride=[1, 1], **kwargs):
x = tf.pad(x, [[0, 0], [filter_size[0] - 1, 0],
[int((filter_size[1] - 1) / 2), int((filter_size[1] - 1) / 2)], [0, 0]])
return conv2d(x, num_filters, filter_size=filter_size, pad='VALID', stride=stride, **kwargs)
@add_arg_scope
def down_shifted_deconv2d(x, num_filters, filter_size=[2, 3], stride=[1, 1], **kwargs):
x = deconv2d(x, num_filters, filter_size=filter_size,
pad='VALID', stride=stride, **kwargs)
xs = int_shape(x)
return x[:, :(xs[1] - filter_size[0] + 1), int((filter_size[1] - 1) / 2):(xs[2] - int((filter_size[1] - 1) / 2)), :]
@add_arg_scope
def down_right_shifted_conv2d(x, num_filters, filter_size=[2, 2], stride=[1, 1], **kwargs):
x = tf.pad(x, [[0, 0], [filter_size[0] - 1, 0],
[filter_size[1] - 1, 0], [0, 0]])
return conv2d(x, num_filters, filter_size=filter_size, pad='VALID', stride=stride, **kwargs)
@add_arg_scope
def down_right_shifted_deconv2d(x, num_filters, filter_size=[2, 2], stride=[1, 1], **kwargs):
x = deconv2d(x, num_filters, filter_size=filter_size,
pad='VALID', stride=stride, **kwargs)
xs = int_shape(x)
return x[:, :(xs[1] - filter_size[0] + 1):, :(xs[2] - filter_size[1] + 1), :]
def causal_shift_nin(x, num_filters, **kwargs):
chns = int_shape(x)[-1]
assert chns % 4 == 0
left, upleft, up, upright = tf.split(x, 4, axis=-1)
return nin(
tf.concat(
[right_shift(left), right_shift(down_shift(upleft)), down_shift(up), down_shift(left_shift(upleft))],
axis=-1
),
num_filters,
**kwargs
)
from tensorflow.python.framework import function
@add_arg_scope
def mem_saving_causal_shift_nin(x, num_filters, init, counters, **kwargs):
if init:
return causal_shift_nin(x, num_filters, init=init, counters=counters, **kwargs)
shps = int_shape(x)
@function.Defun(tf.float32)
def go(ix):
tf.get_variable_scope().reuse_variables()
ix.set_shape(shps)
return causal_shift_nin(ix, num_filters, init=init, counters=counters, **kwargs)
temp = go(x)
temp.set_shape([shps[0], shps[1], shps[2], num_filters])
return temp
import functools
@functools.lru_cache(maxsize=32)
def get_causal_mask(canvas_size, rate=1):
causal_mask = np.zeros([canvas_size, canvas_size], dtype=np.float32)
for i in range(canvas_size):
causal_mask[i, :i] = 1.
causal_mask = tf.constant(causal_mask, dtype=tf.float32)
if rate > 1:
dim = int(np.sqrt(canvas_size))
causal_mask = tf.reshape(causal_mask, [canvas_size, dim, dim, 1])
causal_mask = -tf.nn.max_pool(-causal_mask, [1, rate, rate, 1], [1, rate, rate, 1], 'SAME')
causal_mask = tf.reshape(causal_mask, [1, canvas_size, -1])
return causal_mask
def causal_attention(key, mixin, query, downsample=1, use_pos_enc=False):
bs, nr_chns = int_shape(key)[0], int_shape(key)[-1]
if downsample > 1:
pool_shape = [1, downsample, downsample, 1]
key = tf.nn.max_pool(key, pool_shape, pool_shape, 'SAME')
mixin = tf.nn.max_pool(mixin, pool_shape, pool_shape, 'SAME')
xs = int_shape(mixin)
if use_pos_enc:
pos1 = tf.range(0., xs[1]) / xs[1]
pos2 = tf.range(0., xs[2]) / xs[1]
mixin = tf.concat([
mixin,
tf.tile(pos1[None, :, None, None], [xs[0], 1, xs[2], 1]),
tf.tile(pos2[None, None, :, None], [xs[0], xs[2], 1, 1]),
], axis=3)
mixin_chns = int_shape(mixin)[-1]
canvas_size = int(np.prod(int_shape(key)[1:-1]))
canvas_size_q = int(np.prod(int_shape(query)[1:-1]))
causal_mask = get_causal_mask(canvas_size_q, downsample)
dot = tf.matmul(
tf.reshape(query, [bs, canvas_size_q, nr_chns]),
tf.reshape(key, [bs, canvas_size, nr_chns]),
transpose_b=True
) - (1. - causal_mask) * 1e10
dot = dot - tf.reduce_max(dot, axis=-1, keep_dims=True)
causal_exp_dot = tf.exp(dot / np.sqrt(nr_chns).astype(np.float32)) * causal_mask
causal_probs = causal_exp_dot / (tf.reduce_sum(causal_exp_dot, axis=-1, keep_dims=True) + 1e-6)
mixed = tf.matmul(
causal_probs,
tf.reshape(mixin, [bs, canvas_size, mixin_chns])
)
return tf.reshape(mixed, int_shape(query)[:-1] + [mixin_chns])
def non_cached_get_causal_mask(canvas_size, causal_unit):
assert causal_unit == 1
ones = tf.ones([canvas_size, canvas_size], dtype=tf.float32)
lt = tf.matrix_band_part(ones, -1, 0) - tf.matrix_diag(tf.ones([canvas_size,], dtype=tf.float32))
return lt[None, ...]
def mem_saving_causal_attention(_key, _mixin, _query, causal_unit=1):
# @function.Defun(tf.float32, tf.float32, tf.float32)
def go(key, mixin, query,):
key.set_shape(int_shape(_key))
mixin.set_shape(int_shape(_mixin))
query.set_shape(int_shape(_query))
bs, nr_chns = int_shape(key)[0], int_shape(key)[-1]
mixin_chns = int_shape(mixin)[-1]
canvas_size = int(np.prod(int_shape(key)[1:-1]))
causal_mask = non_cached_get_causal_mask(canvas_size, causal_unit=causal_unit)
dot = tf.matmul(
tf.reshape(query, [bs, canvas_size, nr_chns]),
tf.reshape(key, [bs, canvas_size, nr_chns]),
transpose_b=True
) - (1. - causal_mask) * 1e10
dot = dot - tf.reduce_max(dot, axis=-1, keep_dims=True)
causal_exp_dot = tf.exp(dot / np.sqrt(nr_chns).astype(np.float32)) * causal_mask
causal_probs = causal_exp_dot / (tf.reduce_sum(causal_exp_dot, axis=-1, keep_dims=True) + 1e-6)
mixed = tf.matmul(
causal_probs,
tf.reshape(mixin, [bs, canvas_size, mixin_chns])
)
return tf.reshape(mixed, int_shape(mixin))
temp = go(_key, _mixin, _query)
temp.set_shape(int_shape(_mixin))
return temp
| [
"tensorflow.reduce_sum",
"tensorflow.matrix_band_part",
"tensorflow.square",
"tensorflow.nn.tanh",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tensorflow.get_variable_scope",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.reduce_m... | [((21746, 21777), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (21765, 21777), False, 'import functools\n'), ((797, 819), 'tensorflow.reduce_max', 'tf.reduce_max', (['x', 'axis'], {}), '(x, axis)\n', (810, 819), True, 'import tensorflow as tf\n'), ((830, 868), 'tensorflow.reduce_max', 'tf.reduce_max', (['x', 'axis'], {'keep_dims': '(True)'}), '(x, axis, keep_dims=True)\n', (843, 868), True, 'import tensorflow as tf\n'), ((1089, 1127), 'tensorflow.reduce_max', 'tf.reduce_max', (['x', 'axis'], {'keep_dims': '(True)'}), '(x, axis, keep_dims=True)\n', (1102, 1127), True, 'import tensorflow as tf\n'), ((1700, 1750), 'tensorflow.reshape', 'tf.reshape', (['l[:, :, :, nr_mix:]', '(xs + [nr_mix * 3])'], {}), '(l[:, :, :, nr_mix:], xs + [nr_mix * 3])\n', (1710, 1750), True, 'import tensorflow as tf\n'), ((1805, 1855), 'tensorflow.maximum', 'tf.maximum', (['l[:, :, :, :, nr_mix:2 * nr_mix]', '(-7.0)'], {}), '(l[:, :, :, :, nr_mix:2 * nr_mix], -7.0)\n', (1815, 1855), True, 'import tensorflow as tf\n'), ((1869, 1917), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['l[:, :, :, :, 2 * nr_mix:3 * nr_mix]'], {}), '(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])\n', (1879, 1917), True, 'import tensorflow as tf\n'), ((2084, 2197), 'tensorflow.reshape', 'tf.reshape', (['(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * x[:, :, :, 0, :])', '[xs[0], xs[1], xs[2], 1, nr_mix]'], {}), '(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * x[:, :, :, 0, :],\n [xs[0], xs[1], xs[2], 1, nr_mix])\n', (2094, 2197), True, 'import tensorflow as tf\n'), ((2225, 2381), 'tensorflow.reshape', 'tf.reshape', (['(means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] + coeffs[:,\n :, :, 2, :] * x[:, :, :, 1, :])', '[xs[0], xs[1], xs[2], 1, nr_mix]'], {}), '(means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] +\n coeffs[:, :, :, 2, :] * x[:, :, :, 1, :], [xs[0], xs[1], xs[2], 1, nr_mix])\n', (2235, 2381), True, 'import tensorflow as tf\n'), ((2571, 2590), 'tensorflow.exp', 'tf.exp', (['(-log_scales)'], {}), '(-log_scales)\n', (2577, 2590), True, 'import tensorflow as tf\n'), ((2658, 2680), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['plus_in'], {}), '(plus_in)\n', (2671, 2680), True, 'import tensorflow as tf\n'), ((2746, 2767), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['min_in'], {}), '(min_in)\n', (2759, 2767), True, 'import tensorflow as tf\n'), ((5310, 5449), 'tensorflow.concat', 'tf.concat', (['[lr[:, :, :, None, nr_mix:nr_mix * 2], lg[:, :, :, None, nr_mix:nr_mix * 2],\n lb[:, :, :, None, nr_mix:nr_mix * 2]]'], {'axis': '(-2)'}), '([lr[:, :, :, None, nr_mix:nr_mix * 2], lg[:, :, :, None, nr_mix:\n nr_mix * 2], lb[:, :, :, None, nr_mix:nr_mix * 2]], axis=-2)\n', (5319, 5449), True, 'import tensorflow as tf\n'), ((5458, 5609), 'tensorflow.concat', 'tf.concat', (['[lr[:, :, :, None, nr_mix * 2:nr_mix * 3], lg[:, :, :, None, nr_mix * 2:\n nr_mix * 3], lb[:, :, :, None, nr_mix * 2:nr_mix * 3]]'], {'axis': '(-2)'}), '([lr[:, :, :, None, nr_mix * 2:nr_mix * 3], lg[:, :, :, None, \n nr_mix * 2:nr_mix * 3], lb[:, :, :, None, nr_mix * 2:nr_mix * 3]], axis=-2)\n', (5467, 5609), True, 'import tensorflow as tf\n'), ((5612, 5640), 'tensorflow.maximum', 'tf.maximum', (['log_scales', '(-7.0)'], {}), '(log_scales, -7.0)\n', (5622, 5640), True, 'import tensorflow as tf\n'), ((5745, 5764), 'tensorflow.exp', 'tf.exp', (['(-log_scales)'], {}), '(-log_scales)\n', (5751, 5764), True, 'import tensorflow as tf\n'), ((5832, 5854), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['plus_in'], {}), '(plus_in)\n', (5845, 5854), True, 'import tensorflow as tf\n'), ((5920, 5941), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['min_in'], {}), '(min_in)\n', (5933, 5941), True, 'import tensorflow as tf\n'), ((8157, 8207), 'tensorflow.reshape', 'tf.reshape', (['l[:, :, :, nr_mix:]', '(xs + [nr_mix * 3])'], {}), '(l[:, :, :, nr_mix:], xs + [nr_mix * 3])\n', (8167, 8207), True, 'import tensorflow as tf\n'), ((8448, 8486), 'tensorflow.reshape', 'tf.reshape', (['sel', '(xs[:-1] + [1, nr_mix])'], {}), '(sel, xs[:-1] + [1, nr_mix])\n', (8458, 8486), True, 'import tensorflow as tf\n'), ((8534, 8580), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(l[:, :, :, :, :nr_mix] * sel)', '(4)'], {}), '(l[:, :, :, :, :nr_mix] * sel, 4)\n', (8547, 8580), True, 'import tensorflow as tf\n'), ((9552, 9587), 'tensorflow.get_variable', 'tf.get_variable', (['var_name'], {}), '(var_name, **kwargs)\n', (9567, 9587), True, 'import tensorflow as tf\n'), ((10165, 10191), 'tensorflow.Variable', 'tf.Variable', (['(1.0)', '"""adam_t"""'], {}), "(1.0, 'adam_t')\n", (10176, 10191), True, 'import tensorflow as tf\n'), ((10878, 10896), 'tensorflow.group', 'tf.group', (['*updates'], {}), '(*updates)\n', (10886, 10896), True, 'import tensorflow as tf\n'), ((17527, 17562), 'tensorflow.reshape', 'tf.reshape', (['x', '(s[:-1] + [num_units])'], {}), '(x, s[:-1] + [num_units])\n', (17537, 17562), True, 'import tensorflow as tf\n'), ((18722, 18740), 'tensorflow.split', 'tf.split', (['c2', '(2)', '(3)'], {}), '(c2, 2, 3)\n', (18730, 18740), True, 'import tensorflow as tf\n'), ((20167, 20244), 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [filter_size[0] - 1, 0], [filter_size[1] - 1, 0], [0, 0]]'], {}), '(x, [[0, 0], [filter_size[0] - 1, 0], [filter_size[1] - 1, 0], [0, 0]])\n', (20173, 20244), True, 'import tensorflow as tf\n'), ((20838, 20861), 'tensorflow.split', 'tf.split', (['x', '(4)'], {'axis': '(-1)'}), '(x, 4, axis=-1)\n', (20846, 20861), True, 'import tensorflow as tf\n'), ((21410, 21436), 'tensorflow.python.framework.function.Defun', 'function.Defun', (['tf.float32'], {}), '(tf.float32)\n', (21424, 21436), False, 'from tensorflow.python.framework import function\n'), ((21840, 21894), 'numpy.zeros', 'np.zeros', (['[canvas_size, canvas_size]'], {'dtype': 'np.float32'}), '([canvas_size, canvas_size], dtype=np.float32)\n', (21848, 21894), True, 'import numpy as np\n'), ((21981, 22023), 'tensorflow.constant', 'tf.constant', (['causal_mask'], {'dtype': 'tf.float32'}), '(causal_mask, dtype=tf.float32)\n', (21992, 22023), True, 'import tensorflow as tf\n'), ((22282, 22327), 'tensorflow.reshape', 'tf.reshape', (['causal_mask', '[1, canvas_size, -1]'], {}), '(causal_mask, [1, canvas_size, -1])\n', (22292, 22327), True, 'import tensorflow as tf\n'), ((24008, 24061), 'tensorflow.ones', 'tf.ones', (['[canvas_size, canvas_size]'], {'dtype': 'tf.float32'}), '([canvas_size, canvas_size], dtype=tf.float32)\n', (24015, 24061), True, 'import tensorflow as tf\n'), ((620, 644), 'tensorflow.concat', 'tf.concat', (['[x, -x]', 'axis'], {}), '([x, -x], axis)\n', (629, 644), True, 'import tensorflow as tf\n'), ((2024, 2047), 'tensorflow.reshape', 'tf.reshape', (['x', '(xs + [1])'], {}), '(x, xs + [1])\n', (2034, 2047), True, 'import tensorflow as tf\n'), ((2050, 2073), 'tensorflow.zeros', 'tf.zeros', (['(xs + [nr_mix])'], {}), '(xs + [nr_mix])\n', (2058, 2073), True, 'import tensorflow as tf\n'), ((2857, 2880), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['plus_in'], {}), '(plus_in)\n', (2871, 2880), True, 'import tensorflow as tf\n'), ((2972, 2994), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['min_in'], {}), '(min_in)\n', (2986, 2994), True, 'import tensorflow as tf\n'), ((4597, 4624), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_probs', '(3)'], {}), '(log_probs, 3)\n', (4610, 4624), True, 'import tensorflow as tf\n'), ((5651, 5674), 'tensorflow.reshape', 'tf.reshape', (['x', '(xs + [1])'], {}), '(x, xs + [1])\n', (5661, 5674), True, 'import tensorflow as tf\n'), ((5677, 5700), 'tensorflow.zeros', 'tf.zeros', (['(xs + [nr_mix])'], {}), '(xs + [nr_mix])\n', (5685, 5700), True, 'import tensorflow as tf\n'), ((6031, 6054), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['plus_in'], {}), '(plus_in)\n', (6045, 6054), True, 'import tensorflow as tf\n'), ((6146, 6168), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['min_in'], {}), '(min_in)\n', (6160, 6168), True, 'import tensorflow as tf\n'), ((7771, 7798), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_probs', '(3)'], {}), '(log_probs, 3)\n', (7784, 7798), True, 'import tensorflow as tf\n'), ((8610, 8666), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(l[:, :, :, :, nr_mix:2 * nr_mix] * sel)', '(4)'], {}), '(l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4)\n', (8623, 8666), True, 'import tensorflow as tf\n'), ((9063, 9094), 'tensorflow.maximum', 'tf.maximum', (['x[:, :, :, 0]', '(-1.0)'], {}), '(x[:, :, :, 0], -1.0)\n', (9073, 9094), True, 'import tensorflow as tf\n'), ((9120, 9177), 'tensorflow.maximum', 'tf.maximum', (['(x[:, :, :, 1] + coeffs[:, :, :, 0] * x0)', '(-1.0)'], {}), '(x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.0)\n', (9130, 9177), True, 'import tensorflow as tf\n'), ((9213, 9300), 'tensorflow.maximum', 'tf.maximum', (['(x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1)', '(-1.0)'], {}), '(x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] *\n x1, -1.0)\n', (9223, 9300), True, 'import tensorflow as tf\n'), ((10078, 10113), 'tensorflow.gradients', 'tf.gradients', (['cost_or_grads', 'params'], {}), '(cost_or_grads, params)\n', (10090, 10113), True, 'import tensorflow as tf\n'), ((11376, 11399), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (11393, 11399), True, 'import tensorflow as tf\n'), ((13203, 13226), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (13220, 13226), True, 'import tensorflow as tf\n'), ((15482, 15505), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (15499, 15505), True, 'import tensorflow as tf\n'), ((18067, 18111), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['c1'], {'keep_prob': '(1.0 - dropout_p)'}), '(c1, keep_prob=1.0 - dropout_p)\n', (18080, 18111), True, 'import tensorflow as tf\n'), ((18755, 18771), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['b'], {}), '(b)\n', (18768, 18771), True, 'import tensorflow as tf\n'), ((22108, 22159), 'tensorflow.reshape', 'tf.reshape', (['causal_mask', '[canvas_size, dim, dim, 1]'], {}), '(causal_mask, [canvas_size, dim, dim, 1])\n', (22118, 22159), True, 'import tensorflow as tf\n'), ((22584, 22635), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['key', 'pool_shape', 'pool_shape', '"""SAME"""'], {}), "(key, pool_shape, pool_shape, 'SAME')\n", (22598, 22635), True, 'import tensorflow as tf\n'), ((22653, 22706), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['mixin', 'pool_shape', 'pool_shape', '"""SAME"""'], {}), "(mixin, pool_shape, pool_shape, 'SAME')\n", (22667, 22706), True, 'import tensorflow as tf\n'), ((23487, 23530), 'tensorflow.reduce_max', 'tf.reduce_max', (['dot'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(dot, axis=-1, keep_dims=True)\n', (23500, 23530), True, 'import tensorflow as tf\n'), ((23778, 23826), 'tensorflow.reshape', 'tf.reshape', (['mixin', '[bs, canvas_size, mixin_chns]'], {}), '(mixin, [bs, canvas_size, mixin_chns])\n', (23788, 23826), True, 'import tensorflow as tf\n'), ((24072, 24104), 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['ones', '(-1)', '(0)'], {}), '(ones, -1, 0)\n', (24091, 24104), True, 'import tensorflow as tf\n'), ((2423, 2489), 'tensorflow.reshape', 'tf.reshape', (['means[:, :, :, 0, :]', '[xs[0], xs[1], xs[2], 1, nr_mix]'], {}), '(means[:, :, :, 0, :], [xs[0], xs[1], xs[2], 1, nr_mix])\n', (2433, 2489), True, 'import tensorflow as tf\n'), ((3264, 3286), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['mid_in'], {}), '(mid_in)\n', (3278, 3286), True, 'import tensorflow as tf\n'), ((6438, 6460), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['mid_in'], {}), '(mid_in)\n', (6452, 6460), True, 'import tensorflow as tf\n'), ((8711, 8759), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['l[:, :, :, :, 2 * nr_mix:3 * nr_mix]'], {}), '(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])\n', (8721, 8759), True, 'import tensorflow as tf\n'), ((8992, 9010), 'tensorflow.exp', 'tf.exp', (['log_scales'], {}), '(log_scales)\n', (8998, 9010), True, 'import tensorflow as tf\n'), ((9334, 9363), 'tensorflow.reshape', 'tf.reshape', (['x0', '(xs[:-1] + [1])'], {}), '(x0, xs[:-1] + [1])\n', (9344, 9363), True, 'import tensorflow as tf\n'), ((9365, 9394), 'tensorflow.reshape', 'tf.reshape', (['x1', '(xs[:-1] + [1])'], {}), '(x1, xs[:-1] + [1])\n', (9375, 9394), True, 'import tensorflow as tf\n'), ((9396, 9425), 'tensorflow.reshape', 'tf.reshape', (['x2', '(xs[:-1] + [1])'], {}), '(x2, xs[:-1] + [1])\n', (9406, 9425), True, 'import tensorflow as tf\n'), ((10699, 10720), 'tensorflow.sqrt', 'tf.sqrt', (['(mg_hat + eps)'], {}), '(mg_hat + eps)\n', (10706, 10720), True, 'import tensorflow as tf\n'), ((11740, 11760), 'tensorflow.matmul', 'tf.matmul', (['x', 'V_norm'], {}), '(x, V_norm)\n', (11749, 11760), True, 'import tensorflow as tf\n'), ((11791, 11817), 'tensorflow.nn.moments', 'tf.nn.moments', (['x_init', '[0]'], {}), '(x_init, [0])\n', (11804, 11817), True, 'import tensorflow as tf\n'), ((11898, 11976), 'tensorflow.get_variable', 'tf.get_variable', (['"""g"""'], {'dtype': 'tf.float32', 'initializer': 'scale_init', 'trainable': '(True)'}), "('g', dtype=tf.float32, initializer=scale_init, trainable=True)\n", (11913, 11976), True, 'import tensorflow as tf\n'), ((12027, 12119), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'dtype': 'tf.float32', 'initializer': '(-m_init * scale_init)', 'trainable': '(True)'}), "('b', dtype=tf.float32, initializer=-m_init * scale_init,\n trainable=True)\n", (12042, 12119), True, 'import tensorflow as tf\n'), ((12616, 12631), 'tensorflow.matmul', 'tf.matmul', (['x', 'V'], {}), '(x, V)\n', (12625, 12631), True, 'import tensorflow as tf\n'), ((13589, 13637), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'V_norm', '([1] + stride + [1])', 'pad'], {}), '(x, V_norm, [1] + stride + [1], pad)\n', (13601, 13637), True, 'import tensorflow as tf\n'), ((13668, 13700), 'tensorflow.nn.moments', 'tf.nn.moments', (['x_init', '[0, 1, 2]'], {}), '(x_init, [0, 1, 2])\n', (13681, 13700), True, 'import tensorflow as tf\n'), ((13780, 13858), 'tensorflow.get_variable', 'tf.get_variable', (['"""g"""'], {'dtype': 'tf.float32', 'initializer': 'scale_init', 'trainable': '(True)'}), "('g', dtype=tf.float32, initializer=scale_init, trainable=True)\n", (13795, 13858), True, 'import tensorflow as tf\n'), ((13909, 14001), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'dtype': 'tf.float32', 'initializer': '(-m_init * scale_init)', 'trainable': '(True)'}), "('b', dtype=tf.float32, initializer=-m_init * scale_init,\n trainable=True)\n", (13924, 14001), True, 'import tensorflow as tf\n'), ((15849, 15934), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'V_norm', 'target_shape', '([1] + stride + [1])'], {'padding': 'pad'}), '(x, V_norm, target_shape, [1] + stride + [1], padding=pad\n )\n', (15871, 15934), True, 'import tensorflow as tf\n'), ((16006, 16038), 'tensorflow.nn.moments', 'tf.nn.moments', (['x_init', '[0, 1, 2]'], {}), '(x_init, [0, 1, 2])\n', (16019, 16038), True, 'import tensorflow as tf\n'), ((16118, 16196), 'tensorflow.get_variable', 'tf.get_variable', (['"""g"""'], {'dtype': 'tf.float32', 'initializer': 'scale_init', 'trainable': '(True)'}), "('g', dtype=tf.float32, initializer=scale_init, trainable=True)\n", (16133, 16196), True, 'import tensorflow as tf\n'), ((16247, 16339), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'dtype': 'tf.float32', 'initializer': '(-m_init * scale_init)', 'trainable': '(True)'}), "('b', dtype=tf.float32, initializer=-m_init * scale_init,\n trainable=True)\n", (16262, 16339), True, 'import tensorflow as tf\n'), ((17031, 17106), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'W', 'target_shape', '([1] + stride + [1])'], {'padding': 'pad'}), '(x, W, target_shape, [1] + stride + [1], padding=pad)\n', (17053, 17106), True, 'import tensorflow as tf\n'), ((17142, 17162), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {}), '(x, b)\n', (17156, 17162), True, 'import tensorflow as tf\n'), ((17451, 17466), 'numpy.prod', 'np.prod', (['s[:-1]'], {}), '(s[:-1])\n', (17458, 17466), True, 'import numpy as np\n'), ((18630, 18646), 'tensorflow.matmul', 'tf.matmul', (['h', 'hw'], {}), '(h, hw)\n', (18639, 18646), True, 'import tensorflow as tf\n'), ((18967, 19004), 'tensorflow.zeros', 'tf.zeros', (['[xs[0], step, xs[2], xs[3]]'], {}), '([xs[0], step, xs[2], xs[3]])\n', (18975, 19004), True, 'import tensorflow as tf\n'), ((19116, 19153), 'tensorflow.zeros', 'tf.zeros', (['[xs[0], xs[1], step, xs[3]]'], {}), '([xs[0], xs[1], step, xs[3]])\n', (19124, 19153), True, 'import tensorflow as tf\n'), ((19281, 19318), 'tensorflow.zeros', 'tf.zeros', (['[xs[0], xs[1], step, xs[3]]'], {}), '([xs[0], xs[1], step, xs[3]])\n', (19289, 19318), True, 'import tensorflow as tf\n'), ((22063, 22083), 'numpy.sqrt', 'np.sqrt', (['canvas_size'], {}), '(canvas_size)\n', (22070, 22083), True, 'import numpy as np\n'), ((22184, 22260), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['(-causal_mask)', '[1, rate, rate, 1]', '[1, rate, rate, 1]', '"""SAME"""'], {}), "(-causal_mask, [1, rate, rate, 1], [1, rate, rate, 1], 'SAME')\n", (22198, 22260), True, 'import tensorflow as tf\n'), ((22773, 22793), 'tensorflow.range', 'tf.range', (['(0.0)', 'xs[1]'], {}), '(0.0, xs[1])\n', (22781, 22793), True, 'import tensorflow as tf\n'), ((22817, 22837), 'tensorflow.range', 'tf.range', (['(0.0)', 'xs[2]'], {}), '(0.0, xs[2])\n', (22825, 22837), True, 'import tensorflow as tf\n'), ((23306, 23353), 'tensorflow.reshape', 'tf.reshape', (['query', '[bs, canvas_size_q, nr_chns]'], {}), '(query, [bs, canvas_size_q, nr_chns])\n', (23316, 23353), True, 'import tensorflow as tf\n'), ((23364, 23407), 'tensorflow.reshape', 'tf.reshape', (['key', '[bs, canvas_size, nr_chns]'], {}), '(key, [bs, canvas_size, nr_chns])\n', (23374, 23407), True, 'import tensorflow as tf\n'), ((23657, 23711), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['causal_exp_dot'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(causal_exp_dot, axis=-1, keep_dims=True)\n', (23670, 23711), True, 'import tensorflow as tf\n'), ((24122, 24162), 'tensorflow.ones', 'tf.ones', (['[canvas_size]'], {'dtype': 'tf.float32'}), '([canvas_size], dtype=tf.float32)\n', (24129, 24162), True, 'import tensorflow as tf\n'), ((24990, 25033), 'tensorflow.reduce_max', 'tf.reduce_max', (['dot'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(dot, axis=-1, keep_dims=True)\n', (25003, 25033), True, 'import tensorflow as tf\n'), ((25309, 25357), 'tensorflow.reshape', 'tf.reshape', (['mixin', '[bs, canvas_size, mixin_chns]'], {}), '(mixin, [bs, canvas_size, mixin_chns])\n', (25319, 25357), True, 'import tensorflow as tf\n'), ((906, 920), 'tensorflow.exp', 'tf.exp', (['(x - m2)'], {}), '(x - m2)\n', (912, 920), True, 'import tensorflow as tf\n'), ((1169, 1182), 'tensorflow.exp', 'tf.exp', (['(x - m)'], {}), '(x - m)\n', (1175, 1182), True, 'import tensorflow as tf\n'), ((9014, 9023), 'tensorflow.log', 'tf.log', (['u'], {}), '(u)\n', (9020, 9023), True, 'import tensorflow as tf\n'), ((9026, 9041), 'tensorflow.log', 'tf.log', (['(1.0 - u)'], {}), '(1.0 - u)\n', (9032, 9041), True, 'import tensorflow as tf\n'), ((10615, 10627), 'tensorflow.square', 'tf.square', (['g'], {}), '(g)\n', (10624, 10627), True, 'import tensorflow as tf\n'), ((10659, 10674), 'tensorflow.pow', 'tf.pow', (['mom2', 't'], {}), '(mom2, t)\n', (10665, 10674), True, 'import tensorflow as tf\n'), ((11594, 11631), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (11622, 11631), True, 'import tensorflow as tf\n'), ((11857, 11880), 'tensorflow.sqrt', 'tf.sqrt', (['(v_init + 1e-10)'], {}), '(v_init + 1e-10)\n', (11864, 11880), True, 'import tensorflow as tf\n'), ((12171, 12209), 'tensorflow.reshape', 'tf.reshape', (['scale_init', '[1, num_units]'], {}), '(scale_init, [1, num_units])\n', (12181, 12209), True, 'import tensorflow as tf\n'), ((12777, 12806), 'tensorflow.reshape', 'tf.reshape', (['b', '[1, num_units]'], {}), '(b, [1, num_units])\n', (12787, 12806), True, 'import tensorflow as tf\n'), ((13437, 13474), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (13465, 13474), True, 'import tensorflow as tf\n'), ((13740, 13763), 'tensorflow.sqrt', 'tf.sqrt', (['(v_init + 1e-08)'], {}), '(v_init + 1e-08)\n', (13747, 13763), True, 'import tensorflow as tf\n'), ((14053, 14099), 'tensorflow.reshape', 'tf.reshape', (['scale_init', '[1, 1, 1, num_filters]'], {}), '(scale_init, [1, 1, 1, num_filters])\n', (14063, 14099), True, 'import tensorflow as tf\n'), ((14530, 14567), 'tensorflow.reshape', 'tf.reshape', (['g', '[1, 1, 1, num_filters]'], {}), '(g, [1, 1, 1, num_filters])\n', (14540, 14567), True, 'import tensorflow as tf\n'), ((14589, 14621), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['V', '[0, 1, 2]'], {}), '(V, [0, 1, 2])\n', (14607, 14621), True, 'import tensorflow as tf\n'), ((14708, 14751), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W', '([1] + stride + [1])', 'pad'], {}), '(x, W, [1] + stride + [1], pad)\n', (14720, 14751), True, 'import tensorflow as tf\n'), ((15697, 15734), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (15725, 15734), True, 'import tensorflow as tf\n'), ((16078, 16101), 'tensorflow.sqrt', 'tf.sqrt', (['(v_init + 1e-08)'], {}), '(v_init + 1e-08)\n', (16085, 16101), True, 'import tensorflow as tf\n'), ((16391, 16437), 'tensorflow.reshape', 'tf.reshape', (['scale_init', '[1, 1, 1, num_filters]'], {}), '(scale_init, [1, 1, 1, num_filters])\n', (16401, 16437), True, 'import tensorflow as tf\n'), ((16868, 16905), 'tensorflow.reshape', 'tf.reshape', (['g', '[1, 1, num_filters, 1]'], {}), '(g, [1, 1, num_filters, 1])\n', (16878, 16905), True, 'import tensorflow as tf\n'), ((16927, 16959), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['V', '[0, 1, 3]'], {}), '(V, [0, 1, 3])\n', (16945, 16959), True, 'import tensorflow as tf\n'), ((21463, 21486), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (21484, 21486), True, 'import tensorflow as tf\n'), ((22907, 22963), 'tensorflow.tile', 'tf.tile', (['pos1[None, :, None, None]', '[xs[0], 1, xs[2], 1]'], {}), '(pos1[None, :, None, None], [xs[0], 1, xs[2], 1])\n', (22914, 22963), True, 'import tensorflow as tf\n'), ((22978, 23034), 'tensorflow.tile', 'tf.tile', (['pos2[None, None, :, None]', '[xs[0], xs[2], 1, 1]'], {}), '(pos2[None, None, :, None], [xs[0], xs[2], 1, 1])\n', (22985, 23034), True, 'import tensorflow as tf\n'), ((24779, 24824), 'tensorflow.reshape', 'tf.reshape', (['query', '[bs, canvas_size, nr_chns]'], {}), '(query, [bs, canvas_size, nr_chns])\n', (24789, 24824), True, 'import tensorflow as tf\n'), ((24843, 24886), 'tensorflow.reshape', 'tf.reshape', (['key', '[bs, canvas_size, nr_chns]'], {}), '(key, [bs, canvas_size, nr_chns])\n', (24853, 24886), True, 'import tensorflow as tf\n'), ((25168, 25222), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['causal_exp_dot'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(causal_exp_dot, axis=-1, keep_dims=True)\n', (25181, 25222), True, 'import tensorflow as tf\n'), ((4516, 4544), 'tensorflow.maximum', 'tf.maximum', (['cdf_delta', '(1e-12)'], {}), '(cdf_delta, 1e-12)\n', (4526, 4544), True, 'import tensorflow as tf\n'), ((4561, 4574), 'numpy.log', 'np.log', (['(127.5)'], {}), '(127.5)\n', (4567, 4574), True, 'import numpy as np\n'), ((7690, 7718), 'tensorflow.maximum', 'tf.maximum', (['cdf_delta', '(1e-12)'], {}), '(cdf_delta, 1e-12)\n', (7700, 7718), True, 'import tensorflow as tf\n'), ((7735, 7748), 'numpy.log', 'np.log', (['(127.5)'], {}), '(127.5)\n', (7741, 7748), True, 'import numpy as np\n'), ((10475, 10490), 'tensorflow.pow', 'tf.pow', (['mom1', 't'], {}), '(mom1, t)\n', (10481, 10490), True, 'import tensorflow as tf\n'), ((12240, 12274), 'tensorflow.reshape', 'tf.reshape', (['m_init', '[1, num_units]'], {}), '(m_init, [1, num_units])\n', (12250, 12274), True, 'import tensorflow as tf\n'), ((12717, 12751), 'tensorflow.reshape', 'tf.reshape', (['scaler', '[1, num_units]'], {}), '(scaler, [1, num_units])\n', (12727, 12751), True, 'import tensorflow as tf\n'), ((14146, 14188), 'tensorflow.reshape', 'tf.reshape', (['m_init', '[1, 1, 1, num_filters]'], {}), '(m_init, [1, 1, 1, num_filters])\n', (14156, 14188), True, 'import tensorflow as tf\n'), ((16484, 16526), 'tensorflow.reshape', 'tf.reshape', (['m_init', '[1, 1, 1, num_filters]'], {}), '(m_init, [1, 1, 1, num_filters])\n', (16494, 16526), True, 'import tensorflow as tf\n'), ((18490, 18527), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (18518, 18527), True, 'import tensorflow as tf\n'), ((12680, 12692), 'tensorflow.square', 'tf.square', (['V'], {}), '(V)\n', (12689, 12692), True, 'import tensorflow as tf\n'), ((23568, 23584), 'numpy.sqrt', 'np.sqrt', (['nr_chns'], {}), '(nr_chns)\n', (23575, 23584), True, 'import numpy as np\n'), ((25075, 25091), 'numpy.sqrt', 'np.sqrt', (['nr_chns'], {}), '(nr_chns)\n', (25082, 25091), True, 'import numpy as np\n')] |
"""
NCL_proj_3.py
=============
This script illustrates the following concepts:
- Drawing filled contours over an orthographic map
- Changing the center latitude and longitude for an orthographic projection
- Turning off map fill
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/proj_3.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/proj_3_lg.png
"""
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import geocat.datafiles as gdf
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/atmos.nc"), decode_times=False)
t = ds.TS.isel(time=0)
###############################################################################
# Fix the artifact of not-shown-data around 0 and 360-degree longitudes
wrap_t = gvutil.xr_add_cyclic_longitudes(t, "lon")
###############################################################################
#Plot:
# Generate figure (set its size (width, height) in inches)
fig = plt.figure(figsize=(10, 10))
# Generate axes using Cartopy and draw coastlines with
ax = plt.axes(
projection=ccrs.Orthographic(central_longitude=-120, central_latitude=50))
# Set extent to include latitudes between 0 and 90, and longitude between
# 0 and -180 only
ax.set_extent([0, -180, 0, 90], ccrs.PlateCarree())
ax.set_global()
ax.coastlines(linewidths=0.5)
# Plot data and add a colorbar
temp = wrap_t.plot.contourf(ax=ax,
transform=ccrs.PlateCarree(),
levels=11,
cmap='coolwarm',
add_colorbar=False)
cbar_ticks = np.arange(210, 311, 10)
cbar = plt.colorbar(temp,
orientation='horizontal',
shrink=0.75,
pad=0.05,
extendrect=True,
ticks=cbar_ticks)
cbar.ax.tick_params(labelsize=10)
# Use geocat.viz.util convenience function to add titles to left and right
# of the plot axis.
gvutil.set_titles_and_labels(ax,
maintitle="Example of Orthogonal Projection",
lefttitle="Surface Temperature",
righttitle="K")
# Show the plot
plt.show()
| [
"matplotlib.pyplot.show",
"geocat.viz.util.set_titles_and_labels",
"geocat.viz.util.xr_add_cyclic_longitudes",
"geocat.datafiles.get",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.arange",
"cartopy.crs.PlateCarree",
"cartopy.crs.Orthographic"
] | [((1184, 1225), 'geocat.viz.util.xr_add_cyclic_longitudes', 'gvutil.xr_add_cyclic_longitudes', (['t', '"""lon"""'], {}), "(t, 'lon')\n", (1215, 1225), True, 'from geocat.viz import util as gvutil\n'), ((1380, 1408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1390, 1408), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2044), 'numpy.arange', 'np.arange', (['(210)', '(311)', '(10)'], {}), '(210, 311, 10)\n', (2030, 2044), True, 'import numpy as np\n'), ((2052, 2158), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['temp'], {'orientation': '"""horizontal"""', 'shrink': '(0.75)', 'pad': '(0.05)', 'extendrect': '(True)', 'ticks': 'cbar_ticks'}), "(temp, orientation='horizontal', shrink=0.75, pad=0.05,\n extendrect=True, ticks=cbar_ticks)\n", (2064, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2522), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax'], {'maintitle': '"""Example of Orthogonal Projection"""', 'lefttitle': '"""Surface Temperature"""', 'righttitle': '"""K"""'}), "(ax, maintitle=\n 'Example of Orthogonal Projection', lefttitle='Surface Temperature',\n righttitle='K')\n", (2414, 2522), True, 'from geocat.viz import util as gvutil\n'), ((2618, 2628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2626, 2628), True, 'import matplotlib.pyplot as plt\n'), ((945, 977), 'geocat.datafiles.get', 'gdf.get', (['"""netcdf_files/atmos.nc"""'], {}), "('netcdf_files/atmos.nc')\n", (952, 977), True, 'import geocat.datafiles as gdf\n'), ((1684, 1702), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1700, 1702), True, 'import cartopy.crs as ccrs\n'), ((1495, 1557), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', ([], {'central_longitude': '(-120)', 'central_latitude': '(50)'}), '(central_longitude=-120, central_latitude=50)\n', (1512, 1557), True, 'import cartopy.crs as ccrs\n'), ((1855, 1873), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1871, 1873), True, 'import cartopy.crs as ccrs\n')] |
from __future__ import print_function
import os
import re
from glob import glob
import numpy as np
import tensorflow as tf
from keras.utils.data_utils import get_file
def get_filename(key):
"""Rename tensor name to the corresponding Keras layer weight name.
# Arguments
key: tensor name in TF (determined by tf.variable_scope)
"""
filename = str(key)
filename = filename.replace('/', '_')
filename = filename.replace('xception_65_', '')
filename = filename.replace('decoder_','',1)
filename = filename.replace('BatchNorm','BN')
if 'Momentum' in filename:
return None
if 'entry_flow' in filename or 'exit_flow' in filename:
filename = filename.replace('_unit_1_xception_module','')
elif 'middle_flow' in filename:
filename = filename.replace('_block1','')
filename = filename.replace('_xception_module','')
# from TF to Keras naming
filename = filename.replace('_weights', '_kernel')
filename = filename.replace('_biases', '_bias')
return filename + '.npy'
def extract_tensors_from_checkpoint_file(filename, output_folder='weights'):
"""Extract tensors from a TF checkpoint file.
# Arguments
filename: TF checkpoint file
output_folder: where to save the output numpy array files
"""
if not os.path.exists(output_folder):
os.makedirs(output_folder)
reader = tf.train.NewCheckpointReader(filename)
for key in reader.get_variable_to_shape_map():
# convert tensor name into the corresponding Keras layer weight name and save
filename = get_filename(key)
if filename:
path = os.path.join(output_folder, get_filename(key))
arr = reader.get_tensor(key)
np.save(path, arr)
print("tensor_name: ", key)
CKPT_URL = 'http://download.tensorflow.org/models/deeplabv3_pascal_trainval_2018_01_04.tar.gz'
MODEL_DIR = 'models'
MODEL_SUBDIR = 'deeplabv3_pascal_trainval'
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
checkpoint_tar = get_file(
'deeplabv3_pascal_trainval_2018_01_04.tar.gz',
CKPT_URL,
extract=True,
cache_subdir='',
cache_dir=MODEL_DIR)
checkpoint_file = os.path.join(MODEL_DIR,MODEL_SUBDIR, 'model.ckpt')
extract_tensors_from_checkpoint_file(checkpoint_file)
| [
"numpy.save",
"os.makedirs",
"os.path.exists",
"keras.utils.data_utils.get_file",
"tensorflow.train.NewCheckpointReader",
"os.path.join"
] | [((2059, 2181), 'keras.utils.data_utils.get_file', 'get_file', (['"""deeplabv3_pascal_trainval_2018_01_04.tar.gz"""', 'CKPT_URL'], {'extract': '(True)', 'cache_subdir': '""""""', 'cache_dir': 'MODEL_DIR'}), "('deeplabv3_pascal_trainval_2018_01_04.tar.gz', CKPT_URL, extract=\n True, cache_subdir='', cache_dir=MODEL_DIR)\n", (2067, 2181), False, 'from keras.utils.data_utils import get_file\n'), ((2217, 2268), 'os.path.join', 'os.path.join', (['MODEL_DIR', 'MODEL_SUBDIR', '"""model.ckpt"""'], {}), "(MODEL_DIR, MODEL_SUBDIR, 'model.ckpt')\n", (2229, 2268), False, 'import os\n'), ((1407, 1445), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['filename'], {}), '(filename)\n', (1435, 1445), True, 'import tensorflow as tf\n'), ((1988, 2013), 'os.path.exists', 'os.path.exists', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (2002, 2013), False, 'import os\n'), ((2019, 2041), 'os.makedirs', 'os.makedirs', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (2030, 2041), False, 'import os\n'), ((1327, 1356), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1341, 1356), False, 'import os\n'), ((1366, 1392), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (1377, 1392), False, 'import os\n'), ((1761, 1779), 'numpy.save', 'np.save', (['path', 'arr'], {}), '(path, arr)\n', (1768, 1779), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.