code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 <NAME>
""" Container class for optical usage information
.. Created on Thu Jan 25 11:01:04 2018
.. codeauthor: <NAME>
"""
import math
import numpy as np
from rayoptics.parax.firstorder import compute_first_order, list_parax_trace
from rayoptics.raytr.trace import aim_chief_ray
from rayoptics.optical import model_enums
import rayoptics.optical.model_constants as mc
from opticalglass.spectral_lines import get_wavelength
import rayoptics.util.colour_system as cs
from rayoptics.util import colors
srgb = cs.cs_srgb
class OpticalSpecs:
""" Container class for optical usage information
Contains optical usage information to specify the aperture, field of view,
spectrum and focal position. These can be accessed via the mapping
interface:
- self['wvls']: instance of :class:`~.WvlSpec`
- self['pupil']: instance of :class:`~.PupilSpec`
- self['fov']: instance of :class:`~.FieldSpec`
- self['focus']: instance of :class:`~.FocusRange`
It also maintains a repository of paraxial data.
Attributes:
parax_data: tuple of :obj:`~.firstorder.ParaxData`
"""
do_aiming_default = True
def __init__(self, opt_model, specsheet=None, **kwargs):
self.opt_model = opt_model
self._submodels = {}
self['wvls'] = WvlSpec(**kwargs)
self['pupil'] = PupilSpec(self)
self['fov'] = FieldSpec(self)
self['focus'] = FocusRange(0.0)
self.parax_data = None
self.do_aiming = OpticalSpecs.do_aiming_default
if specsheet:
self.set_from_specsheet(specsheet)
def __getitem__(self, key):
""" Provide mapping interface to submodels. """
return self._submodels[key]
def __setitem__(self, key, value):
""" Provide mapping interface to submodels. """
self._submodels[key] = value
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['opt_model']
del attrs['_submodels']
del attrs['parax_data']
del attrs['do_aiming']
attrs['spectral_region'] = self['wvls']
attrs['pupil'] = self['pupil']
attrs['field_of_view'] = self['fov']
attrs['defocus'] = self['focus']
return attrs
def __json_decode__(self, **attrs):
submodels = {}
submodels['wvls'] = attrs['spectral_region']
submodels['pupil'] = attrs['pupil']
submodels['fov'] = attrs['field_of_view']
submodels['focus'] = (attrs['defocus'] if 'defocus' in attrs
else FocusRange(0.0))
self._submodels = submodels
@property
def spectral_region(self):
return self._submodels['wvls']
@spectral_region.setter
def spectral_region(self, sr):
self._submodels['wvls'] = sr
@property
def pupil(self):
return self._submodels['pupil']
@pupil.setter
def pupil(self, pup):
self._submodels['pupil'] = pup
@property
def field_of_view(self):
return self._submodels['fov']
@field_of_view.setter
def field_of_view(self, fov):
self._submodels['fov'] = fov
@property
def defocus(self):
return self._submodels['focus']
@defocus.setter
def defocus(self, foc):
self._submodels['focus'] = foc
def set_from_list(self, dl):
self.spectral_region = dl[0]
self.pupil = dl[1]
self.field_of_view = dl[2]
def set_from_specsheet(self, ss):
self.spectral_region.set_from_specsheet(ss)
self.pupil.set_from_specsheet(ss)
self.field_of_view.set_from_specsheet(ss)
self.defocus.set_from_specsheet(ss)
def sync_to_restore(self, opt_model):
self.opt_model = opt_model
if not hasattr(self, 'do_aiming'):
self.do_aiming = OpticalSpecs.do_aiming_default
self['wvls'].sync_to_restore(self)
self['pupil'].sync_to_restore(self)
self['fov'].sync_to_restore(self)
def update_model(self, **kwargs):
self.spectral_region.update_model(**kwargs)
self.pupil.update_model(**kwargs)
self.field_of_view.update_model(**kwargs)
stop = self.opt_model.seq_model.stop_surface
wvl = self.spectral_region.central_wvl
if self.opt_model.seq_model.get_num_surfaces() > 2:
self.parax_data = compute_first_order(self.opt_model, stop, wvl)
if self.do_aiming:
for i, fld in enumerate(self.field_of_view.fields):
aim_pt = aim_chief_ray(self.opt_model, fld, wvl)
fld.aim_pt = aim_pt
def lookup_fld_wvl_focus(self, fi, wl=None, fr=0.0):
""" returns field, wavelength and defocus data
Args:
fi (int): index into the field_of_view list of Fields
wl (int): index into the spectral_region list of wavelengths
fr (float): focus range parameter, -1.0 to 1.0
Returns:
(**fld**, **wvl**, **foc**)
- **fld** - :class:`Field` instance for field_of_view[fi]
- **wvl** - wavelength in nm
- **foc** - focus shift from image interface
"""
if wl is None:
wvl = self.spectral_region.central_wvl
else:
wvl = self.spectral_region.wavelengths[wl]
fld = self.field_of_view.fields[fi]
foc = self.defocus.get_focus(fr)
return fld, wvl, foc
def obj_coords(self, fld):
return self.field_of_view.obj_coords(fld)
def list_first_order_data(self):
self.parax_data.fod.list_first_order_data()
def list_parax_trace(self, **kwargs):
list_parax_trace(self.opt_model, **kwargs)
class WvlSpec:
""" Class defining a spectral region
A spectral region is a list of wavelengths (in nm) and corresponding
weights. The central wavelength of the spectral region is central_wvl.
The index into the wavelength list for central_wvl is reference_wvl.
"""
def __init__(self, wlwts=[('d', 1.)], ref_wl=0, do_init=True, **kwargs):
if do_init:
self.set_from_list(wlwts)
else:
self.wavelengths = []
self.spectral_wts = []
self.reference_wvl = ref_wl
self.coating_wvl = 550.0
@property
def central_wvl(self):
return self.wavelengths[self.reference_wvl]
@central_wvl.setter
def central_wvl(self, wvl):
self.wavelengths[self.reference_wvl] = wvl
def set_from_list(self, wlwts):
self.wavelengths = []
self.spectral_wts = []
for wlwt in wlwts:
self.wavelengths.append(get_wavelength(wlwt[0]))
self.spectral_wts.append(wlwt[1])
self.calc_colors()
def sync_to_restore(self, optical_spec):
self.calc_colors()
def set_from_specsheet(self, ss):
pass
def update_model(self, **kwargs):
self.calc_colors()
def add(self, wl, wt):
self.wavelengths.append(get_wavelength(wl))
self.spectral_wts.append(wt)
self.spectrum.sort(key=lambda w: w[0], reverse=True)
def calc_colors(self):
accent = colors.accent_colors()
self.render_colors = []
num_wvls = len(self.wavelengths)
if num_wvls == 1:
self.render_colors.append(accent['green'])
elif num_wvls > 1:
step = 1 if self.wavelengths[0] < self.wavelengths[-1] else -1
if num_wvls == 2:
c = ['blue', 'red']
elif num_wvls == 3:
c = ['blue', 'green', 'red']
elif num_wvls == 4:
c = ['blue', 'green', 'yellow', 'red']
elif num_wvls == 5:
c = ['violet', 'cyan', 'green', 'yellow', 'red']
elif num_wvls == 6:
c = ['violet', 'cyan', 'green', 'yellow', 'red', 'magenta']
else:
c = ['violet', 'blue', 'cyan', 'green', 'yellow',
'red', 'magenta']
self.render_colors = [accent[clr] for clr in c[::step]]
# else:
# for w in self.wavelengths:
# print("calc_colors", w)
# rgb = srgb.wvl_to_rgb(w)
# print("rgb", rgb)
# self.render_colors.append(rgb)
class PupilSpec:
""" Aperture specification
Attributes:
key: 'aperture', 'object'|'image', 'pupil'|'NA'|'f/#'
value: size of the pupil
pupil_rays: list of relative pupil coordinates for pupil limiting rays
ray_labels: list of string labels for pupil_rays
"""
default_pupil_rays = [[0., 0.], [1., 0.], [-1., 0.], [0., 1.], [0., -1.]]
default_ray_labels = ['00', '+X', '-X', '+Y', '-Y']
def __init__(self, parent, key=('object', 'pupil'), value=1.0):
self.optical_spec = parent
self.key = 'aperture', key[0], key[1]
self.value = value
self.pupil_rays = PupilSpec.default_pupil_rays
self.ray_labels = PupilSpec.default_ray_labels
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['optical_spec']
return attrs
def sync_to_restore(self, optical_spec):
if hasattr(self, 'pupil_type'):
self.key = model_enums.get_ape_key_for_type(self.pupil_type)
del self.pupil_type
self.optical_spec = optical_spec
def set_from_specsheet(self, ss):
self.key, self.value = ss.get_etendue_inputs('aperture')
def get_input_for_specsheet(self):
return self.key, self.value
def update_model(self, **kwargs):
if not hasattr(self, 'pupil_rays'):
self.pupil_rays = PupilSpec.default_pupil_rays
self.ray_labels = PupilSpec.default_ray_labels
def get_pupil_type(self):
return model_enums.get_ape_type_for_key(self.key).value
def mutate_pupil_type(self, new_pupil_type):
ape_key = model_enums.get_ape_key_for_type(new_pupil_type)
aperture, obj_img_key, value_key = ape_key
if self.optical_spec is not None:
if self.optical_spec.parax_data is not None:
fod = self.optical_spec.parax_data.fod
if obj_img_key == 'object':
if value_key == 'pupil':
self.value = 2*fod.enp_radius
elif value_key == 'NA':
self.value = fod.obj_na
elif obj_img_key == 'image':
if value_key == 'f/#':
self.value = fod.fno
elif value_key == 'NA':
self.value = fod.img_na
self.key = ape_key
class FieldSpec:
""" Field of view specification
Attributes:
key: 'field', 'object'|'image', 'height'|'angle'
value: maximum field, per the key
fields: list of Field instances
is_relative: if True, `fields` are relative to max field
"""
def __init__(self, parent, key=('object', 'angle'), value=0., flds=[0.],
is_relative=False, do_init=True, **kwargs):
self.optical_spec = parent
self.key = 'field', key[0], key[1]
self.value = value
self.is_relative = is_relative
if do_init:
self.set_from_list(flds)
else:
self.fields = []
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['optical_spec']
return attrs
def sync_to_restore(self, optical_spec):
if hasattr(self, 'field_type'):
self.key = model_enums.get_fld_key_for_type(self.field_type)
del self.field_type
if not hasattr(self, 'is_relative'):
self.is_relative = False
if not hasattr(self, 'value'):
self.value, _ = self.max_field()
self.optical_spec = optical_spec
def __str__(self):
return "key={}, max field={}".format(self.key, self.max_field()[0])
def set_from_list(self, flds):
self.fields = [Field() for f in range(len(flds))]
for i, f in enumerate(self.fields):
f.y = flds[i]
self.value, _ = self.max_field()
def set_from_specsheet(self, ss):
key, value = ss.get_etendue_inputs('field')
if value != 0 and len(self.fields) == 1:
# just one field, add a second one for max value
self.is_relative = True
self.fields.append(Field(x=0, y=1))
if not self.is_relative:
fld_scale = 1 if self.value == 0 else value/self.value
for i, f in enumerate(self.fields):
f.x *= fld_scale
f.y *= fld_scale
self.key, self.value = key, value
def get_input_for_specsheet(self):
return self.key, self.value
def update_model(self, **kwargs):
for f in self.fields:
f.update()
# recalculate max_field and relabel fields.
# relabeling really assumes the fields are radial, specifically,
# y axis only
if self.is_relative:
field_norm = 1
else:
field_norm = 1 if self.value == 0 else 1.0/self.value
self.index_labels = []
for f in self.fields:
if f.x != 0.0:
fldx = '{:5.2f}x'.format(field_norm*f.x)
else:
fldx = ''
if f.y != 0.0:
fldy = '{:5.2f}y'.format(field_norm*f.y)
else:
fldy = ''
self.index_labels.append(fldx + fldy)
self.index_labels[0] = 'axis'
if len(self.index_labels) > 1:
self.index_labels[-1] = 'edge'
return self
def get_field_type(self):
return model_enums.get_fld_type_for_key(self.key).value
def mutate_field_type(self, new_field_type):
osp = self.optical_spec
fld_key = model_enums.get_fld_key_for_type(new_field_type)
field, obj_img_key, value_key = fld_key
if self.optical_spec is not None:
if osp.parax_data is not None:
fod = self.optical_spec.parax_data.fod
if obj_img_key == 'object':
if value_key == 'height':
self.value = osp.parax_data.pr_ray[0][mc.ht]
elif value_key == 'angle':
self.value = fod.obj_ang
elif obj_img_key == 'image':
if value_key == 'height':
self.value = fod.img_ht
self.key = fld_key
def obj_coords(self, fld):
fld_coord = np.array([fld.x, fld.y, 0.0])
if self.is_relative:
fld_coord *= self.value
field, obj_img_key, value_key = self.key
fod = self.optical_spec.parax_data.fod
if obj_img_key == 'object':
if value_key == 'angle':
dir_tan = np.tan(np.deg2rad(fld_coord))
obj_pt = -dir_tan*(fod.obj_dist+fod.enp_dist)
elif value_key == 'height':
obj_pt = fld_coord
elif obj_img_key == 'image':
if value_key == 'height':
img_pt = fld_coord
obj_pt = fod.red*img_pt
return obj_pt
def max_field(self):
""" calculates the maximum field of view
Returns:
magnitude of maximum field, maximum Field instance
"""
max_fld = None
max_fld_sqrd = -1.0
for i, f in enumerate(self.fields):
fld_sqrd = f.x*f.x + f.y*f.y
if fld_sqrd > max_fld_sqrd:
max_fld_sqrd = fld_sqrd
max_fld = i
max_fld_value = math.sqrt(max_fld_sqrd)
if self.is_relative:
max_fld_value *= self.value
return max_fld_value, max_fld
class Field:
""" a single field point, largely a data container
Attributes:
x: x field component
y: y field component
vux: +x vignetting factor
vuy: +y vignetting factor
vlx: -x vignetting factor
vly: -y vignetting factor
wt: field weight
aim_pt: x, y chief ray coords on the paraxial entrance pupil plane
chief_ray: ray package for the ray from the field point throught the
center of the aperture stop, traced in the central
wavelength
ref_sphere: a tuple containing (image_pt, ref_dir, ref_sphere_radius)
"""
def __init__(self, x=0., y=0., wt=1.):
self.x = x
self.y = y
self.vux = 0.0
self.vuy = 0.0
self.vlx = 0.0
self.vly = 0.0
self.wt = wt
self.aim_pt = None
self.chief_ray = None
self.ref_sphere = None
def __json_encode__(self):
attrs = dict(vars(self))
items = ['chief_ray', 'ref_sphere', 'pupil_rays']
for item in items:
if item in attrs:
del attrs[item]
return attrs
def __str__(self):
return "{}, {}".format(self.x, self.y)
def __repr__(self):
return "Field(x={}, y={}, wt={})".format(self.x, self.y, self.wt)
def update(self):
self.chief_ray = None
self.ref_sphere = None
def apply_vignetting(self, pupil):
vig_pupil = pupil[:]
if pupil[0] < 0.0:
if self.vlx != 0.0:
vig_pupil[0] *= (1.0 - self.vlx)
else:
if self.vux != 0.0:
vig_pupil[0] *= (1.0 - self.vux)
if pupil[1] < 0.0:
if self.vly != 0.0:
vig_pupil[1] *= (1.0 - self.vly)
else:
if self.vuy != 0.0:
vig_pupil[1] *= (1.0 - self.vuy)
return vig_pupil
class FocusRange:
""" Focus range specification
Attributes:
focus_shift: focus shift (z displacement) from nominal image interface
defocus_range: +/- half the total focal range, from the focus_shift
position
"""
def __init__(self, focus_shift=0.0, defocus_range=0.0):
self.focus_shift = focus_shift
self.defocus_range = defocus_range
def __repr__(self):
return ("FocusRange(focus_shift={}, defocus_range={})"
.format(self.focus_shift, self.defocus_range))
def set_from_specsheet(self, ss):
pass
def update(self):
pass
def get_focus(self, fr=0.0):
""" return focus position for input focus range parameter
Args:
fr (float): focus range parameter, -1.0 to 1.0
Returns:
focus position for input focus range parameter
"""
return self.focus_shift + fr*self.defocus_range
| [
"rayoptics.raytr.trace.aim_chief_ray",
"rayoptics.parax.firstorder.compute_first_order",
"math.sqrt",
"rayoptics.optical.model_enums.get_ape_type_for_key",
"numpy.array",
"rayoptics.util.colors.accent_colors",
"opticalglass.spectral_lines.get_wavelength",
"numpy.deg2rad",
"rayoptics.optical.model_en... | [((5738, 5780), 'rayoptics.parax.firstorder.list_parax_trace', 'list_parax_trace', (['self.opt_model'], {}), '(self.opt_model, **kwargs)\n', (5754, 5780), False, 'from rayoptics.parax.firstorder import compute_first_order, list_parax_trace\n'), ((7233, 7255), 'rayoptics.util.colors.accent_colors', 'colors.accent_colors', ([], {}), '()\n', (7253, 7255), False, 'from rayoptics.util import colors\n'), ((9988, 10036), 'rayoptics.optical.model_enums.get_ape_key_for_type', 'model_enums.get_ape_key_for_type', (['new_pupil_type'], {}), '(new_pupil_type)\n', (10020, 10036), False, 'from rayoptics.optical import model_enums\n'), ((13922, 13970), 'rayoptics.optical.model_enums.get_fld_key_for_type', 'model_enums.get_fld_key_for_type', (['new_field_type'], {}), '(new_field_type)\n', (13954, 13970), False, 'from rayoptics.optical import model_enums\n'), ((14632, 14661), 'numpy.array', 'np.array', (['[fld.x, fld.y, 0.0]'], {}), '([fld.x, fld.y, 0.0])\n', (14640, 14661), True, 'import numpy as np\n'), ((15699, 15722), 'math.sqrt', 'math.sqrt', (['max_fld_sqrd'], {}), '(max_fld_sqrd)\n', (15708, 15722), False, 'import math\n'), ((4438, 4484), 'rayoptics.parax.firstorder.compute_first_order', 'compute_first_order', (['self.opt_model', 'stop', 'wvl'], {}), '(self.opt_model, stop, wvl)\n', (4457, 4484), False, 'from rayoptics.parax.firstorder import compute_first_order, list_parax_trace\n'), ((7070, 7088), 'opticalglass.spectral_lines.get_wavelength', 'get_wavelength', (['wl'], {}), '(wl)\n', (7084, 7088), False, 'from opticalglass.spectral_lines import get_wavelength\n'), ((9321, 9370), 'rayoptics.optical.model_enums.get_ape_key_for_type', 'model_enums.get_ape_key_for_type', (['self.pupil_type'], {}), '(self.pupil_type)\n', (9353, 9370), False, 'from rayoptics.optical import model_enums\n'), ((9871, 9913), 'rayoptics.optical.model_enums.get_ape_type_for_key', 'model_enums.get_ape_type_for_key', (['self.key'], {}), '(self.key)\n', (9903, 9913), False, 'from rayoptics.optical import model_enums\n'), ((11627, 11676), 'rayoptics.optical.model_enums.get_fld_key_for_type', 'model_enums.get_fld_key_for_type', (['self.field_type'], {}), '(self.field_type)\n', (11659, 11676), False, 'from rayoptics.optical import model_enums\n'), ((13773, 13815), 'rayoptics.optical.model_enums.get_fld_type_for_key', 'model_enums.get_fld_type_for_key', (['self.key'], {}), '(self.key)\n', (13805, 13815), False, 'from rayoptics.optical import model_enums\n'), ((6721, 6744), 'opticalglass.spectral_lines.get_wavelength', 'get_wavelength', (['wlwt[0]'], {}), '(wlwt[0])\n', (6735, 6744), False, 'from opticalglass.spectral_lines import get_wavelength\n'), ((4613, 4652), 'rayoptics.raytr.trace.aim_chief_ray', 'aim_chief_ray', (['self.opt_model', 'fld', 'wvl'], {}), '(self.opt_model, fld, wvl)\n', (4626, 4652), False, 'from rayoptics.raytr.trace import aim_chief_ray\n'), ((14931, 14952), 'numpy.deg2rad', 'np.deg2rad', (['fld_coord'], {}), '(fld_coord)\n', (14941, 14952), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable
from chainer import optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
import sys
sys.path.append("//tera/user/boku/study/nn")
import iomod as io
import csv
import pickle
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
#
argvs = sys.argv
argc = len(argvs)
if (argc != 4):
print ("Usage: Learn_path Test_path OUtput_path")
quit()
#parameter
image_side_size = 9
image_size = image_side_size * image_side_size * image_side_size
learn_data_size = 1000
test_data_size = 500
hidden1 = 100
hidden2 = 3
pre_N = 1
N = 300
#load_file_learn
learn = np.fromfile(argvs[1],np.float64)
learn = learn.astype(np.float32)
learn = learn.reshape(image_size,learn_data_size)
learn_max = np.max(learn,axis = 0)
learn_min = np.min(learn,axis = 0)
learn_mat = ( learn - learn_min[np.newaxis,:] ) / ( learn_max[np.newaxis,:] - learn_min[np.newaxis,:])
xtrain = learn_mat.T
#load_file_test
test = np.fromfile(argvs[2],np.float64)
test = test.astype(np.float32)
test = test.reshape(image_size,test_data_size)
test_max = np.max(test,axis = 0)
test_min = np.min(test,axis = 0)
test_mat = ( test - test_min[np.newaxis,:] ) / ( test_max[np.newaxis,:] - test_min[np.newaxis,:])
xtest = test_mat.T
#input_test_save
trans_test = test.T
in_temp = trans_test.copy(order = 'C')
for t in range(test_data_size):
io.save_raw(in_temp[t,:], argvs[3] + "sae/input_test" + str(t) +".raw",np.float32)
# Define model
class AE1(Chain):
def __init__(self):
super(AE1, self).__init__(
l1=L.Linear(image_size,hidden1),
l2=L.Linear(hidden1,image_size),
)
def __call__(self,x):
bv = self.fwd(x)
return F.mean_squared_error(bv, x)
def fwd(self,x):
fv = F.sigmoid(self.l1(x))
bv = F.sigmoid(self.l2(fv))
f1 = open(argvs[3] + "sae/picklep_sae_fv.dump", "wb")
pickle.dump(fv, f1)
f1.close()
return bv
class AE2(Chain):
def __init__(self):
super(AE2, self).__init__(
l3=L.Linear(hidden1,hidden2),
l4=L.Linear(hidden2,hidden1),
)
def __call__(self,x):
bv = self.fwd(x)
return F.mean_squared_error(bv, x)
def fwd(self,x):
fv = F.sigmoid(self.l3(x))
bv = F.sigmoid(self.l4(fv))
return bv
class MyAE(Chain):
def __init__(self):
super(MyAE, self).__init__(
l11=L.Linear(image_size,hidden1,initialW = model1.l1.W.data,initial_bias = model1.l1.b.data),
l12=L.Linear(hidden1,hidden2,initialW = model2.l3.W.data, initial_bias = model2.l3.b.data),
l13 =L.Linear(hidden2,hidden1,initialW = model2.l4.W.data, initial_bias = model2.l4.b.data),
l14 =L.Linear(hidden1,image_size,initialW = model1.l2.W.data, initial_bias = model1.l2.b.data),
)
def __call__(self,x):
bv2 = self.fwd(x)
return F.mean_squared_error(bv2, x)
def fwd(self,x):
fv1 = F.sigmoid(self.l11(x))
fv2 = F.sigmoid(self.l12(fv1))
bv1 = F.sigmoid(self.l13(fv2))
bv2 = F.sigmoid(self.l14(bv1))
return bv2
# Initialize model
model1 = AE1()
optimizer = optimizers.Adam()
optimizer.setup(model1)
train_losses = []
test_losses = []
# pre_training1
print ("pre_training1")
for i in range(pre_N):
x_batch = Variable(xtrain)
model1.zerograds()
loss = model1(x_batch)
loss.backward()
optimizer.update()
# print (loss.data)
f = open(argvs[3] + "sae/picklep_sae_fv.dump", "rb")
temp = pickle.load(f)
xtrain2 = temp.data
f.close()
#pre_training2
print ("pre_training2")
model2 = AE2()
optimizer.setup(model2)
for j in range(pre_N):
x = Variable(xtrain2)
model2.zerograds()
loss = model2(x)
loss.backward()
optimizer.update()
# print (loss.data)
#learn
print ("learn")
model3 = MyAE()
optimizer.setup(model3)
for i in range(N):
x_batch = Variable(xtrain)
model3.zerograds()
train_loss = model3(x_batch)
train_loss.backward()
optimizer.update()
train_losses.append(train_loss.data)
print (train_loss.data)
#test_loss
x_batch = Variable(xtest)
test_loss = model3(x_batch)
test_losses.append(test_loss.data)
# print (test_loss.data)
'''
#loss_save
print "train_loss"
for i in range(len(train_losses)):
print '%f\n' % (train_losses[i]),
print '\n'
print "test_loss"
for i in range(len(test_losses)):
print '%f\n' % (test_losses[i]),
print '\n'
'''
#final_result
x = Variable(xtest, volatile='on')
t1 = F.sigmoid(model3.l11(x))
t2 = F.sigmoid(model3.l12(t1))
with open(argvs[3] + 'sae/hidden_out.csv', 'wt') as f:
writer = csv.writer(f)
writer.writerows(t2.data)
t3 = F.sigmoid(model3.l13(t2))
y = F.sigmoid(model3.l14(t3))
print(y.shape)
temp_out = ( y.data.T * ( test_max[np.newaxis,:] - test_min[np.newaxis,:])) + test_min[np.newaxis,:]
temp_out2 = temp_out.T
for t in range(test_data_size):
io.save_raw(temp_out2[t,:],argvs[3] + "sae/output_test" + str(t) + ".raw",np.float32)
print (test.shape)
print(test)
print (temp_out.shape)
print(temp_out)
tenmpp = abs(test - temp_out)
tenmpp2 = np.average(tenmpp,axis = 0)
gene = np.average(tenmpp2)
with open(argvs[3] + 'sae/file_gene1.csv', 'wt') as f:
writer = csv.writer(f)
writer.writerows(tenmpp)
print (tenmpp2)
print ("gene")
print (gene)
'''
#knini_keizyo
data=np.loadtxt('hidden_in.csv',delimiter=',',dtype=np.float32)
print (data.shape)
hidden_out = Variable(data, volatile='on')
t3_hidden = F.sigmoid(model3.l13(hidden_out))
y_hidden = F.sigmoid(model3.l14(t3_hidden))
hidden_temp_out = y_hidden.data.T
#hidden_temp_out = (y_hidden.data.T* ( test_max[np.newaxis,:] - test_min[np.newaxis,:])) + test_min[np.newaxis,:]
hidden_temp_out2 = hidden_temp_out.T
for t in range(17):
io.save_raw(hidden_temp_out2[t,:]*100,"C:/Users/yourb/Desktop/sae1/hiddenput_test" + str(t) + ".raw",np.float32)
'''
#matplotlib_setting
plt.plot(train_losses,'b',label = "train_error1")
plt.plot(test_losses,'r',label = "test_error1")
plt.legend()
plt.grid()
plt.show()
| [
"chainer.functions.mean_squared_error",
"numpy.fromfile",
"matplotlib.pyplot.grid",
"pickle.dump",
"numpy.average",
"chainer.optimizers.Adam",
"chainer.Variable",
"matplotlib.pyplot.plot",
"pickle.load",
"csv.writer",
"numpy.max",
"chainer.links.Linear",
"numpy.min",
"sys.path.append",
"... | [((280, 324), 'sys.path.append', 'sys.path.append', (['"""//tera/user/boku/study/nn"""'], {}), "('//tera/user/boku/study/nn')\n", (295, 324), False, 'import sys\n'), ((784, 817), 'numpy.fromfile', 'np.fromfile', (['argvs[1]', 'np.float64'], {}), '(argvs[1], np.float64)\n', (795, 817), True, 'import numpy as np\n'), ((912, 933), 'numpy.max', 'np.max', (['learn'], {'axis': '(0)'}), '(learn, axis=0)\n', (918, 933), True, 'import numpy as np\n'), ((947, 968), 'numpy.min', 'np.min', (['learn'], {'axis': '(0)'}), '(learn, axis=0)\n', (953, 968), True, 'import numpy as np\n'), ((1118, 1151), 'numpy.fromfile', 'np.fromfile', (['argvs[2]', 'np.float64'], {}), '(argvs[2], np.float64)\n', (1129, 1151), True, 'import numpy as np\n'), ((1240, 1260), 'numpy.max', 'np.max', (['test'], {'axis': '(0)'}), '(test, axis=0)\n', (1246, 1260), True, 'import numpy as np\n'), ((1273, 1293), 'numpy.min', 'np.min', (['test'], {'axis': '(0)'}), '(test, axis=0)\n', (1279, 1293), True, 'import numpy as np\n'), ((3349, 3366), 'chainer.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (3364, 3366), False, 'from chainer import optimizers, serializers, utils\n'), ((3698, 3712), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3709, 3712), False, 'import pickle\n'), ((4657, 4687), 'chainer.Variable', 'Variable', (['xtest'], {'volatile': '"""on"""'}), "(xtest, volatile='on')\n", (4665, 4687), False, 'from chainer import cuda, Function, gradient_check, Variable\n'), ((5297, 5323), 'numpy.average', 'np.average', (['tenmpp'], {'axis': '(0)'}), '(tenmpp, axis=0)\n', (5307, 5323), True, 'import numpy as np\n'), ((5333, 5352), 'numpy.average', 'np.average', (['tenmpp2'], {}), '(tenmpp2)\n', (5343, 5352), True, 'import numpy as np\n'), ((6090, 6139), 'matplotlib.pyplot.plot', 'plt.plot', (['train_losses', '"""b"""'], {'label': '"""train_error1"""'}), "(train_losses, 'b', label='train_error1')\n", (6098, 6139), True, 'import matplotlib.pyplot as plt\n'), ((6140, 6187), 'matplotlib.pyplot.plot', 'plt.plot', (['test_losses', '"""r"""'], {'label': '"""test_error1"""'}), "(test_losses, 'r', label='test_error1')\n", (6148, 6187), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6201), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6199, 6201), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6212), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6210, 6212), True, 'import matplotlib.pyplot as plt\n'), ((6213, 6223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6221, 6223), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3520), 'chainer.Variable', 'Variable', (['xtrain'], {}), '(xtrain)\n', (3512, 3520), False, 'from chainer import cuda, Function, gradient_check, Variable\n'), ((3853, 3870), 'chainer.Variable', 'Variable', (['xtrain2'], {}), '(xtrain2)\n', (3861, 3870), False, 'from chainer import cuda, Function, gradient_check, Variable\n'), ((4078, 4094), 'chainer.Variable', 'Variable', (['xtrain'], {}), '(xtrain)\n', (4086, 4094), False, 'from chainer import cuda, Function, gradient_check, Variable\n'), ((4299, 4314), 'chainer.Variable', 'Variable', (['xtest'], {}), '(xtest)\n', (4307, 4314), False, 'from chainer import cuda, Function, gradient_check, Variable\n'), ((4817, 4830), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4827, 4830), False, 'import csv\n'), ((5421, 5434), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5431, 5434), False, 'import csv\n'), ((1870, 1897), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['bv', 'x'], {}), '(bv, x)\n', (1890, 1897), True, 'import chainer.functions as F\n'), ((2061, 2080), 'pickle.dump', 'pickle.dump', (['fv', 'f1'], {}), '(fv, f1)\n', (2072, 2080), False, 'import pickle\n'), ((2357, 2384), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['bv', 'x'], {}), '(bv, x)\n', (2377, 2384), True, 'import chainer.functions as F\n'), ((3078, 3106), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['bv2', 'x'], {}), '(bv2, x)\n', (3098, 3106), True, 'import chainer.functions as F\n'), ((1718, 1747), 'chainer.links.Linear', 'L.Linear', (['image_size', 'hidden1'], {}), '(image_size, hidden1)\n', (1726, 1747), True, 'import chainer.links as L\n'), ((1763, 1792), 'chainer.links.Linear', 'L.Linear', (['hidden1', 'image_size'], {}), '(hidden1, image_size)\n', (1771, 1792), True, 'import chainer.links as L\n'), ((2211, 2237), 'chainer.links.Linear', 'L.Linear', (['hidden1', 'hidden2'], {}), '(hidden1, hidden2)\n', (2219, 2237), True, 'import chainer.links as L\n'), ((2253, 2279), 'chainer.links.Linear', 'L.Linear', (['hidden2', 'hidden1'], {}), '(hidden2, hidden1)\n', (2261, 2279), True, 'import chainer.links as L\n'), ((2592, 2684), 'chainer.links.Linear', 'L.Linear', (['image_size', 'hidden1'], {'initialW': 'model1.l1.W.data', 'initial_bias': 'model1.l1.b.data'}), '(image_size, hidden1, initialW=model1.l1.W.data, initial_bias=\n model1.l1.b.data)\n', (2600, 2684), True, 'import chainer.links as L\n'), ((2699, 2788), 'chainer.links.Linear', 'L.Linear', (['hidden1', 'hidden2'], {'initialW': 'model2.l3.W.data', 'initial_bias': 'model2.l3.b.data'}), '(hidden1, hidden2, initialW=model2.l3.W.data, initial_bias=model2.\n l3.b.data)\n', (2707, 2788), True, 'import chainer.links as L\n'), ((2804, 2893), 'chainer.links.Linear', 'L.Linear', (['hidden2', 'hidden1'], {'initialW': 'model2.l4.W.data', 'initial_bias': 'model2.l4.b.data'}), '(hidden2, hidden1, initialW=model2.l4.W.data, initial_bias=model2.\n l4.b.data)\n', (2812, 2893), True, 'import chainer.links as L\n'), ((2909, 3001), 'chainer.links.Linear', 'L.Linear', (['hidden1', 'image_size'], {'initialW': 'model1.l2.W.data', 'initial_bias': 'model1.l2.b.data'}), '(hidden1, image_size, initialW=model1.l2.W.data, initial_bias=\n model1.l2.b.data)\n', (2917, 3001), True, 'import chainer.links as L\n')] |
# -*- coding: utf-8 -*-
"""
examples from: https://likegeeks.com/python-gui-examples-tkinter-tutorial/
"""
import numpy as np
from tkinter import *
window = Tk()
window.title("Welcome")
window.geometry('400x600')
# 1st func
n = 0
lbl = Label(window, text="Extract continuous pages")
lbl.grid(column=0, row=n)
ent = Entry(window, width=8) # textbox to enter start/stop page numbers of a pdf file
ent.grid(column=1, row = n)
def clicked():
#lbl.configure(text="1st button clicked !!")
Fst = int(ent.get())
print(ent.get())
print(Fst[0])
print(Fst[2])
n0 = int(Fst[0])
n1 = int(Fst[2])
for i in range(n0,n1,np.sign(n1-n0)):
print(i)
btn = Button(window, text="1st button", command=clicked)
btn.grid(column=2, row=n)
# 2nd func
n += 1
lbl_ = Label(window, text="Extract multiple selected pages")
lbl_.grid(column=0, row=n)
def clicked_():
lbl_.configure(text="2nd button clicked !!")
btn_ = Button(window, text="2nd button", command=clicked_)
btn_.grid(column=1, row=n)
# 3rd func
n += 1
lbl__ = Label(window, text="Combine multi files")
lbl__.grid(column=0, row=n)
def clicked__():
lbl__.configure(text="3rd button clicked !!")
btn__ = Button(window, text="3rd button", command=clicked__)
btn__.grid(column=1, row=n)
# entry box
n += 1
def show_entry_fields():
print("First Name: %s\nLast Name: %s" % (e1.get(), e2.get()))
Label(window,
text="<NAME>").grid(row=n)
Label(window,
text="<NAME>").grid(row=n+1)
e1 = Entry(window)
e2 = Entry(window)
e1.grid(row=n, column=1)
e2.grid(row=n+1, column=1)
Button(window,
text='Quit',
command= window.quit).grid(row=n+2,
column=0,
pady=4)
Button(window,
text='Show', command=show_entry_fields).grid(row=n+2,
column=1,
pady=4)
# combo-box
from tkinter.ttk import *
n += 4
Label(window, text="Example of Combobox:").grid(column=0,row=n)
combo = Combobox(window)
combo['values']= (1, 2, 3, 5, 8, "Text")
combo.current(1) #set the selected item
combo.grid(column=1, row=n)
# check-box
n += 1
Label(window, text="Example of Checkbox:").grid(column=0,row=n)
bchk = BooleanVar()
bchk.set(True) #set check state
chk = Checkbutton(window, text='Checkbox', var= bchk )
chk.grid(column=1, row=n)
# radio-button
from tkinter.ttk import *
n += 1
rad1 = Radiobutton(window,text='First', value=1)
rad2 = Radiobutton(window,text='Second', value=2)
rad3 = Radiobutton(window,text='Third', value=3)
Label(window, text="Example of radiobox:").grid(column=0,row=n)
rad1.grid(column=1, row=n)
rad2.grid(column=1, row=n+1)
rad3.grid(column=1, row=n+2)
# spin box
n += 3
Label(window, text="Example of spinbox:").grid(column=0,row=n)
spin = Spinbox(window, from_=0, to=100, width=5)
spin.grid(column=1,row=n)
# progress bar
n += 1
Label(window, text="Example of progressbar:").grid(column=0,row=n)
bar = Progressbar(window, length=100, style='black.Horizontal.TProgressbar')
bar['value'] = 30
bar.grid(column=1, row=n)
# file dialog
n += 1
from os import path
from tkinter import filedialog
Label(window, text="Example of file dialog:").grid(column=0,row=n)
#file = filedialog.askopenfilename()
#file = filedialog.askopenfilename(initialdir= path.dirname(__file__))
#file.grid(column = 1, row = n)
# menu
from tkinter import Menu
menu = Menu(window)
new_item = Menu(menu)
sav_item = Menu(menu)
new_item.add_command(label='New')
sav_item.add_command(label="save_as_png")
sav_item.add_separator()
sav_item.add_command(label="save_as_jpg")
menu.add_cascade(label='File', menu=new_item)
menu.add_cascade(label="Save", menu=sav_item)
window.config(menu=menu)
'''
# notebook
from tkinter import ttk
n = n + 3
tab_control = ttk.Notebook(window)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab_control.add(tab1, text='First')
tab_control.add(tab2, text='Second')
lbl1 = Label(tab1, text= 'label1')
lbl1.grid(column=0, row=n)
lbl2 = Label(tab2, text= 'label2')
lbl2.grid(column=1, row=n)
tab_control.pack(expand=1, fill='both')
'''
window.mainloop()
| [
"tkinter.Menu",
"numpy.sign"
] | [((3736, 3748), 'tkinter.Menu', 'Menu', (['window'], {}), '(window)\n', (3740, 3748), False, 'from tkinter import Menu\n'), ((3762, 3772), 'tkinter.Menu', 'Menu', (['menu'], {}), '(menu)\n', (3766, 3772), False, 'from tkinter import Menu\n'), ((3786, 3796), 'tkinter.Menu', 'Menu', (['menu'], {}), '(menu)\n', (3790, 3796), False, 'from tkinter import Menu\n'), ((686, 702), 'numpy.sign', 'np.sign', (['(n1 - n0)'], {}), '(n1 - n0)\n', (693, 702), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class UNet(nn.Module):
def __init__(self, nefilters=24):
super(UNet, self).__init__()
print('random unet')
nlayers = 12
self.num_layers = nlayers
self.nefilters = nefilters
filter_size = 5
merge_filter_size = 5
self.context = True
self.encoder0 = nn.ModuleList()
self.encoder1 = nn.ModuleList()
self.encoder2 = nn.ModuleList()
self.decoder0 = nn.ModuleList()
self.decoder1 = nn.ModuleList()
self.decoder2 = nn.ModuleList()
self.ebatch = nn.ModuleList()
self.dbatch = nn.ModuleList()
echannelin = [1] + [(i + 1) * nefilters for i in range(nlayers - 1)]
echannelout = [(i + 1) * nefilters for i in range(nlayers)]
dchannelout = echannelout[::-1]
dchannelin = [dchannelout[0] * 2] + [(i) * nefilters + (i - 1) * nefilters for i in range(nlayers, 1, -1)]
for i in range(self.num_layers):
self.encoder0.append(
nn.Conv1d(echannelin[i], echannelout[i], filter_size, dilation=1, padding=filter_size // 2 * 1))
self.encoder1.append(
nn.Conv1d(echannelin[i], echannelout[i], filter_size, dilation=2, padding=filter_size // 2 * 2))
self.encoder2.append(
nn.Conv1d(echannelin[i], echannelout[i], filter_size, dilation=3, padding=filter_size // 2 * 3))
self.decoder0.append(nn.Conv1d(dchannelin[i], dchannelout[i], merge_filter_size, dilation=1,
padding=merge_filter_size // 2 * 1))
self.decoder1.append(nn.Conv1d(dchannelin[i], dchannelout[i], merge_filter_size, dilation=2,
padding=merge_filter_size // 2 * 2))
self.decoder2.append(nn.Conv1d(dchannelin[i], dchannelout[i], merge_filter_size, dilation=3,
padding=merge_filter_size // 2 * 3))
self.ebatch.append(nn.BatchNorm1d(echannelout[i]))
self.dbatch.append(nn.BatchNorm1d(dchannelout[i]))
self.encoder = [self.encoder0, self.encoder1, self.encoder2]
# self.encoder.append(self.encoder0)
# self.encoder.append(self.encoder1)
# self.encoder.append(self.encoder2)
self.decoder = [self.decoder0, self.decoder1, self.decoder2]
# self.decoder.append(self.decoder0)
# self.decoder.append(self.decoder1)
# self.decoder.append(self.decoder2)
self.middle = nn.Sequential(
nn.Conv1d(echannelout[-1], echannelout[-1], filter_size, padding=filter_size // 2),
nn.BatchNorm1d(echannelout[-1]),
nn.LeakyReLU(0.1)
)
self.out = nn.Sequential(
nn.Conv1d(nefilters + 1, 1, 1),
nn.Tanh()
)
def forward(self, x, randint=None):
if not randint:
randint = np.random.randint(0, 3)
input = x
encoder = list()
for i in range(self.num_layers):
# print(randint)
x = self.encoder[int(randint[i])][i](x)
# x = self.encoder[i](x)
x = self.ebatch[i](x)
x = F.leaky_relu(x, 0.1)
encoder.append(x)
x = x[:, :, ::2]
x = self.middle(x)
for i in range(self.num_layers):
x = F.upsample(x, scale_factor=2, mode='linear')
x = torch.cat([x, encoder[self.num_layers - i - 1]], dim=1)
x = self.decoder[int(randint[i + self.num_layers])][i](x)
x = self.dbatch[i](x)
x = F.leaky_relu(x, 0.1)
x = torch.cat([x, input], dim=1)
x = self.out(x)
return x
| [
"torch.nn.functional.upsample",
"torch.nn.functional.leaky_relu",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.ModuleList",
"torch.nn.BatchNorm1d",
"numpy.random.randint",
"torch.nn.Conv1d",
"torch.cat"
] | [((427, 442), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (440, 442), True, 'import torch.nn as nn\n'), ((468, 483), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (481, 483), True, 'import torch.nn as nn\n'), ((509, 524), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (522, 524), True, 'import torch.nn as nn\n'), ((550, 565), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (563, 565), True, 'import torch.nn as nn\n'), ((591, 606), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (604, 606), True, 'import torch.nn as nn\n'), ((632, 647), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (645, 647), True, 'import torch.nn as nn\n'), ((671, 686), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (684, 686), True, 'import torch.nn as nn\n'), ((710, 725), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (723, 725), True, 'import torch.nn as nn\n'), ((3800, 3828), 'torch.cat', 'torch.cat', (['[x, input]'], {'dim': '(1)'}), '([x, input], dim=1)\n', (3809, 3828), False, 'import torch\n'), ((2687, 2774), 'torch.nn.Conv1d', 'nn.Conv1d', (['echannelout[-1]', 'echannelout[-1]', 'filter_size'], {'padding': '(filter_size // 2)'}), '(echannelout[-1], echannelout[-1], filter_size, padding=\n filter_size // 2)\n', (2696, 2774), True, 'import torch.nn as nn\n'), ((2784, 2815), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['echannelout[-1]'], {}), '(echannelout[-1])\n', (2798, 2815), True, 'import torch.nn as nn\n'), ((2830, 2847), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2842, 2847), True, 'import torch.nn as nn\n'), ((2907, 2937), 'torch.nn.Conv1d', 'nn.Conv1d', (['(nefilters + 1)', '(1)', '(1)'], {}), '(nefilters + 1, 1, 1)\n', (2916, 2937), True, 'import torch.nn as nn\n'), ((2952, 2961), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2959, 2961), True, 'import torch.nn as nn\n'), ((3066, 3089), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3083, 3089), True, 'import numpy as np\n'), ((3352, 3372), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.1)'], {}), '(x, 0.1)\n', (3364, 3372), True, 'import torch.nn.functional as F\n'), ((3525, 3569), 'torch.nn.functional.upsample', 'F.upsample', (['x'], {'scale_factor': '(2)', 'mode': '"""linear"""'}), "(x, scale_factor=2, mode='linear')\n", (3535, 3569), True, 'import torch.nn.functional as F\n'), ((3587, 3642), 'torch.cat', 'torch.cat', (['[x, encoder[self.num_layers - i - 1]]'], {'dim': '(1)'}), '([x, encoder[self.num_layers - i - 1]], dim=1)\n', (3596, 3642), False, 'import torch\n'), ((3766, 3786), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.1)'], {}), '(x, 0.1)\n', (3778, 3786), True, 'import torch.nn.functional as F\n'), ((1128, 1228), 'torch.nn.Conv1d', 'nn.Conv1d', (['echannelin[i]', 'echannelout[i]', 'filter_size'], {'dilation': '(1)', 'padding': '(filter_size // 2 * 1)'}), '(echannelin[i], echannelout[i], filter_size, dilation=1, padding=\n filter_size // 2 * 1)\n', (1137, 1228), True, 'import torch.nn as nn\n'), ((1277, 1377), 'torch.nn.Conv1d', 'nn.Conv1d', (['echannelin[i]', 'echannelout[i]', 'filter_size'], {'dilation': '(2)', 'padding': '(filter_size // 2 * 2)'}), '(echannelin[i], echannelout[i], filter_size, dilation=2, padding=\n filter_size // 2 * 2)\n', (1286, 1377), True, 'import torch.nn as nn\n'), ((1426, 1526), 'torch.nn.Conv1d', 'nn.Conv1d', (['echannelin[i]', 'echannelout[i]', 'filter_size'], {'dilation': '(3)', 'padding': '(filter_size // 2 * 3)'}), '(echannelin[i], echannelout[i], filter_size, dilation=3, padding=\n filter_size // 2 * 3)\n', (1435, 1526), True, 'import torch.nn as nn\n'), ((1559, 1670), 'torch.nn.Conv1d', 'nn.Conv1d', (['dchannelin[i]', 'dchannelout[i]', 'merge_filter_size'], {'dilation': '(1)', 'padding': '(merge_filter_size // 2 * 1)'}), '(dchannelin[i], dchannelout[i], merge_filter_size, dilation=1,\n padding=merge_filter_size // 2 * 1)\n', (1568, 1670), True, 'import torch.nn as nn\n'), ((1746, 1857), 'torch.nn.Conv1d', 'nn.Conv1d', (['dchannelin[i]', 'dchannelout[i]', 'merge_filter_size'], {'dilation': '(2)', 'padding': '(merge_filter_size // 2 * 2)'}), '(dchannelin[i], dchannelout[i], merge_filter_size, dilation=2,\n padding=merge_filter_size // 2 * 2)\n', (1755, 1857), True, 'import torch.nn as nn\n'), ((1933, 2044), 'torch.nn.Conv1d', 'nn.Conv1d', (['dchannelin[i]', 'dchannelout[i]', 'merge_filter_size'], {'dilation': '(3)', 'padding': '(merge_filter_size // 2 * 3)'}), '(dchannelin[i], dchannelout[i], merge_filter_size, dilation=3,\n padding=merge_filter_size // 2 * 3)\n', (1942, 2044), True, 'import torch.nn as nn\n'), ((2120, 2150), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['echannelout[i]'], {}), '(echannelout[i])\n', (2134, 2150), True, 'import torch.nn as nn\n'), ((2184, 2214), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['dchannelout[i]'], {}), '(dchannelout[i])\n', (2198, 2214), True, 'import torch.nn as nn\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
from re import L
import numpy as np
import torch
from logging import warning
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (torch.Tensor): The value to be converted.
offset (float, optional): Offset to set the value range. \
Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
torch.Tensor: Value in the range of \
[-offset * period, (1-offset) * period]
"""
return val - torch.floor(val / period + offset) * period
def rotation_3d_in_axis(points, angles, axis=0):
"""Rotate points by angles according to axis.
Args:
points (torch.Tensor): Points of shape (N, M, 3).
angles (torch.Tensor): Vector of angles in shape (N,)
axis (int, optional): The axis to be rotated. Defaults to 0.
Raises:
ValueError: when the axis is not in range [0, 1, 2], it will \
raise value error.
Returns:
torch.Tensor: Rotated points in shape (N, M, 3)
"""
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
ones = torch.ones_like(rot_cos)
zeros = torch.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, zeros, -rot_sin]),
torch.stack([zeros, ones, zeros]),
torch.stack([rot_sin, zeros, rot_cos])
])
elif axis == 2 or axis == -1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, -rot_sin, zeros]),
torch.stack([rot_sin, rot_cos, zeros]),
torch.stack([zeros, zeros, ones])
])
elif axis == 0:
rot_mat_T = torch.stack([
torch.stack([zeros, rot_cos, -rot_sin]),
torch.stack([zeros, rot_sin, rot_cos]),
torch.stack([ones, zeros, zeros])
])
else:
raise ValueError(f'axis should in range [0, 1, 2], got {axis}')
return torch.einsum('aij,jka->aik', (points, rot_mat_T))
def xywhr2xyxyr(boxes_xywhr):
"""Convert a rotated boxes in XYWHR format to XYXYR format.
Args:
boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.
Returns:
torch.Tensor: Converted boxes in XYXYR format.
"""
boxes = torch.zeros_like(boxes_xywhr)
half_w = boxes_xywhr[:, 2] / 2
half_h = boxes_xywhr[:, 3] / 2
boxes[:, 0] = boxes_xywhr[:, 0] - half_w
boxes[:, 1] = boxes_xywhr[:, 1] - half_h
boxes[:, 2] = boxes_xywhr[:, 0] + half_w
boxes[:, 3] = boxes_xywhr[:, 1] + half_h
boxes[:, 4] = boxes_xywhr[:, 4]
return boxes
def get_box_type(box_type):
"""Get the type and mode of box structure.
Args:
box_type (str): The type of box structure.
The valid value are "LiDAR", "Camera", or "Depth".
Returns:
tuple: Box type and box mode.
"""
from .box_3d_mode import (Box3DMode, CameraInstance3DBoxes,
DepthInstance3DBoxes, LiDARInstance3DBoxes)
box_type_lower = box_type.lower()
if box_type_lower == 'lidar':
box_type_3d = LiDARInstance3DBoxes
box_mode_3d = Box3DMode.LIDAR
elif box_type_lower == 'camera':
box_type_3d = CameraInstance3DBoxes
box_mode_3d = Box3DMode.CAM
elif box_type_lower == 'depth':
box_type_3d = DepthInstance3DBoxes
box_mode_3d = Box3DMode.DEPTH
else:
raise ValueError('Only "box_type" of "camera", "lidar", "depth"'
f' are supported, got {box_type}')
return box_type_3d, box_mode_3d
def points_cam2img(points_3d, proj_mat, with_depth=False):
"""Project points from camera coordicates to image coordinates.
Args:
points_3d (torch.Tensor): Points in shape (N, 3).
proj_mat (torch.Tensor): Transformation matrix between coordinates.
with_depth (bool, optional): Whether to keep depth in the output.
Defaults to False.
Returns:
torch.Tensor: Points in image coordinates with shape [N, 2].
"""
points_num = list(points_3d.shape)[:-1]
points_shape = np.concatenate([points_num, [1]], axis=0).tolist()
assert len(proj_mat.shape) == 2, 'The dimension of the projection'\
f' matrix should be 2 instead of {len(proj_mat.shape)}.'
d1, d2 = proj_mat.shape[:2]
assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or (
d1 == 4 and d2 == 4), 'The shape of the projection matrix'\
f' ({d1}*{d2}) is not supported.'
if d1 == 3:
proj_mat_expanded = torch.eye(
4, device=proj_mat.device, dtype=proj_mat.dtype)
proj_mat_expanded[:d1, :d2] = proj_mat
proj_mat = proj_mat_expanded
# previous implementation use new_zeros, new_one yeilds better results
points_4 = torch.cat(
[points_3d, points_3d.new_ones(*points_shape)], dim=-1)
point_2d = torch.matmul(points_4, proj_mat.t())
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
if with_depth:
return torch.cat([point_2d_res, point_2d[..., 2:3]], dim=-1)
return point_2d_res
def points_cam2img_jrdb(points_3d):
"""Project points from camera coordicates to image coordinates.
Args:
points_3d (torch.Tensor): Points in shape (N, 3).
proj_mat (torch.Tensor): Transformation matrix between coordinates.
with_depth (bool, optional): Whether to keep depth in the output.
Defaults to False.
Returns:
torch.Tensor: Points in image coordinates with shape [N, 2].
"""
point_2d_res = torch.zeros(points_3d.shape)[:,:,:2]
for i in range(point_2d_res.shape[0]):
point_2d_res[i,:,0],point_2d_res[i,:,1] = projection(points_3d[i,:,:])
return point_2d_res
def projection(pts_3d_rect):
'''
JRDB에서 메일로 받은 코드
'''
x = pts_3d_rect[:,0]
y = pts_3d_rect[:,1]
z = pts_3d_rect[:,2]
horizontal_theta = np.arctan(x / z)
horizontal_theta += (z < 0) * np.pi
horizontal_percent = horizontal_theta/(2 * np.pi)
result_x = ((horizontal_percent * 3760) + 1880) % 3760
result_y = (485.78 * (y / ((1 / np.cos(horizontal_theta)) *
z))) + (0.4375 * 480)
return result_x,result_y
def mono_cam_box2vis(cam_box):
"""This is a post-processing function on the bboxes from Mono-3D task. If
we want to perform projection visualization, we need to:
1. rotate the box along x-axis for np.pi / 2 (roll)
2. change orientation from local yaw to global yaw
3. convert yaw by (np.pi / 2 - yaw)
After applying this function, we can project and draw it on 2D images.
Args:
cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate \
system before conversion. Could be gt bbox loaded from dataset or \
network prediction output.
Returns:
:obj:`CameraInstance3DBoxes`: Box after conversion.
"""
warning.warn('DeprecationWarning: The hack of yaw and dimension in the '
'monocular 3D detection on nuScenes has been removed. The '
'function mono_cam_box2vis will be deprecated.')
from . import CameraInstance3DBoxes
assert isinstance(cam_box, CameraInstance3DBoxes), \
'input bbox should be CameraInstance3DBoxes!'
loc = cam_box.gravity_center
dim = cam_box.dims
yaw = cam_box.yaw
feats = cam_box.tensor[:, 7:]
# rotate along x-axis for np.pi / 2
# see also here: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L557 # noqa
dim[:, [1, 2]] = dim[:, [2, 1]]
# change local yaw to global yaw for visualization
# refer to https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L164-L166 # noqa
yaw += torch.atan2(loc[:, 0], loc[:, 2])
# convert yaw by (-yaw - np.pi / 2)
# this is because mono 3D box class such as `NuScenesBox` has different
# definition of rotation with our `CameraInstance3DBoxes`
yaw = -yaw - np.pi / 2
cam_box = torch.cat([loc, dim, yaw[:, None], feats], dim=1)
cam_box = CameraInstance3DBoxes(
cam_box, box_dim=cam_box.shape[-1], origin=(0.5, 0.5, 0.5))
return cam_box
def get_proj_mat_by_coord_type(img_meta, coord_type):
"""Obtain image features using points.
Args:
img_meta (dict): Meta info.
coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.
Can be case-insensitive.
Returns:
torch.Tensor: transformation matrix.
"""
coord_type = coord_type.upper()
mapping = {'LIDAR': 'lidar2img', 'DEPTH': 'depth2img', 'CAMERA': 'cam2img'}
assert coord_type in mapping.keys()
return img_meta[mapping[coord_type]]
| [
"torch.ones_like",
"logging.warning.warn",
"torch.eye",
"torch.atan2",
"torch.sin",
"torch.floor",
"torch.stack",
"torch.cos",
"torch.einsum",
"numpy.cos",
"numpy.concatenate",
"torch.zeros_like",
"torch.zeros",
"torch.cat",
"numpy.arctan"
] | [((1162, 1179), 'torch.sin', 'torch.sin', (['angles'], {}), '(angles)\n', (1171, 1179), False, 'import torch\n'), ((1194, 1211), 'torch.cos', 'torch.cos', (['angles'], {}), '(angles)\n', (1203, 1211), False, 'import torch\n'), ((1223, 1247), 'torch.ones_like', 'torch.ones_like', (['rot_cos'], {}), '(rot_cos)\n', (1238, 1247), False, 'import torch\n'), ((1260, 1285), 'torch.zeros_like', 'torch.zeros_like', (['rot_cos'], {}), '(rot_cos)\n', (1276, 1285), False, 'import torch\n'), ((2040, 2089), 'torch.einsum', 'torch.einsum', (['"""aij,jka->aik"""', '(points, rot_mat_T)'], {}), "('aij,jka->aik', (points, rot_mat_T))\n", (2052, 2089), False, 'import torch\n'), ((2353, 2382), 'torch.zeros_like', 'torch.zeros_like', (['boxes_xywhr'], {}), '(boxes_xywhr)\n', (2369, 2382), False, 'import torch\n'), ((5992, 6008), 'numpy.arctan', 'np.arctan', (['(x / z)'], {}), '(x / z)\n', (6001, 6008), True, 'import numpy as np\n'), ((6996, 7181), 'logging.warning.warn', 'warning.warn', (['"""DeprecationWarning: The hack of yaw and dimension in the monocular 3D detection on nuScenes has been removed. The function mono_cam_box2vis will be deprecated."""'], {}), "(\n 'DeprecationWarning: The hack of yaw and dimension in the monocular 3D detection on nuScenes has been removed. The function mono_cam_box2vis will be deprecated.'\n )\n", (7008, 7181), False, 'from logging import warning\n'), ((7881, 7914), 'torch.atan2', 'torch.atan2', (['loc[:, 0]', 'loc[:, 2]'], {}), '(loc[:, 0], loc[:, 2])\n', (7892, 7914), False, 'import torch\n'), ((8134, 8183), 'torch.cat', 'torch.cat', (['[loc, dim, yaw[:, None], feats]'], {'dim': '(1)'}), '([loc, dim, yaw[:, None], feats], dim=1)\n', (8143, 8183), False, 'import torch\n'), ((4625, 4683), 'torch.eye', 'torch.eye', (['(4)'], {'device': 'proj_mat.device', 'dtype': 'proj_mat.dtype'}), '(4, device=proj_mat.device, dtype=proj_mat.dtype)\n', (4634, 4683), False, 'import torch\n'), ((5092, 5145), 'torch.cat', 'torch.cat', (['[point_2d_res, point_2d[..., 2:3]]'], {'dim': '(-1)'}), '([point_2d_res, point_2d[..., 2:3]], dim=-1)\n', (5101, 5145), False, 'import torch\n'), ((5641, 5669), 'torch.zeros', 'torch.zeros', (['points_3d.shape'], {}), '(points_3d.shape)\n', (5652, 5669), False, 'import torch\n'), ((610, 644), 'torch.floor', 'torch.floor', (['(val / period + offset)'], {}), '(val / period + offset)\n', (621, 644), False, 'import torch\n'), ((4188, 4229), 'numpy.concatenate', 'np.concatenate', (['[points_num, [1]]'], {'axis': '(0)'}), '([points_num, [1]], axis=0)\n', (4202, 4229), True, 'import numpy as np\n'), ((1350, 1389), 'torch.stack', 'torch.stack', (['[rot_cos, zeros, -rot_sin]'], {}), '([rot_cos, zeros, -rot_sin])\n', (1361, 1389), False, 'import torch\n'), ((1403, 1436), 'torch.stack', 'torch.stack', (['[zeros, ones, zeros]'], {}), '([zeros, ones, zeros])\n', (1414, 1436), False, 'import torch\n'), ((1450, 1488), 'torch.stack', 'torch.stack', (['[rot_sin, zeros, rot_cos]'], {}), '([rot_sin, zeros, rot_cos])\n', (1461, 1488), False, 'import torch\n'), ((1580, 1619), 'torch.stack', 'torch.stack', (['[rot_cos, -rot_sin, zeros]'], {}), '([rot_cos, -rot_sin, zeros])\n', (1591, 1619), False, 'import torch\n'), ((1633, 1671), 'torch.stack', 'torch.stack', (['[rot_sin, rot_cos, zeros]'], {}), '([rot_sin, rot_cos, zeros])\n', (1644, 1671), False, 'import torch\n'), ((1685, 1718), 'torch.stack', 'torch.stack', (['[zeros, zeros, ones]'], {}), '([zeros, zeros, ones])\n', (1696, 1718), False, 'import torch\n'), ((1796, 1835), 'torch.stack', 'torch.stack', (['[zeros, rot_cos, -rot_sin]'], {}), '([zeros, rot_cos, -rot_sin])\n', (1807, 1835), False, 'import torch\n'), ((1849, 1887), 'torch.stack', 'torch.stack', (['[zeros, rot_sin, rot_cos]'], {}), '([zeros, rot_sin, rot_cos])\n', (1860, 1887), False, 'import torch\n'), ((1901, 1934), 'torch.stack', 'torch.stack', (['[ones, zeros, zeros]'], {}), '([ones, zeros, zeros])\n', (1912, 1934), False, 'import torch\n'), ((6198, 6222), 'numpy.cos', 'np.cos', (['horizontal_theta'], {}), '(horizontal_theta)\n', (6204, 6222), True, 'import numpy as np\n')] |
import os
from typing import Any, Dict, List
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torchmetrics.functional.text.bert import bert_score as metrics_bert_score
from torchmetrics.text.bert import BERTScore
from torchmetrics.utilities.imports import _BERTSCORE_AVAILABLE
if _BERTSCORE_AVAILABLE:
from bert_score import score as original_bert_score
os.environ["TOKENIZERS_PARALLELISM"] = "1"
# Examples and expected values taken from:
# https://github.com/Tiiiger/bert_score/blob/master/tests/test_scorer.py
preds = [
"28-year-old chef found dead in San Francisco mall",
"A 28-year-old chef who recently moved to San Francisco was "
"found dead in the staircase of a local shopping center.",
"The victim's brother said he cannot imagine anyone who would want to harm him,\"Finally, it went uphill again at "
'him."',
]
targets = [
"28-Year-Old Chef Found Dead at San Francisco Mall",
"A 28-year-old chef who had recently moved to San Francisco was found dead in the stairwell of a local mall this "
"week.",
"But the victim's brother says he can't think of anyone who would want to hurt him, saying, \"Things were finally "
'going well for him."',
]
_METRICS = ["precision", "recall", "f1"]
MODEL_NAME = "albert-base-v2"
def _assert_list(preds: Any, targets: Any, threshold: float = 1e-8):
"""Assert two lists are equal."""
assert np.allclose(preds, targets, atol=threshold, equal_nan=True)
def _parse_original_bert_score(score: torch.Tensor) -> Dict[str, List[float]]:
"""Parse the BERT score returned by the original `bert-score` package."""
score_dict = {metric: value.tolist() for metric, value in zip(_METRICS, score)}
return score_dict
preds_batched = [preds[0:2], preds[2:]]
targets_batched = [targets[0:2], targets[2:]]
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_fn(preds, targets):
"""Tests for functional."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3)
original_score = _parse_original_bert_score(original_score)
metrics_score = metrics_bert_score(
preds, targets, model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3
)
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_fn_with_idf(preds, targets):
"""Tests for functional with IDF rescaling."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, num_layers=12, idf=True, batch_size=3)
original_score = _parse_original_bert_score(original_score)
metrics_score = metrics_bert_score(
preds, targets, model_name_or_path=MODEL_NAME, num_layers=12, idf=True, batch_size=3
)
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_fn_all_layers(preds, targets):
"""Tests for functional and all layers."""
original_score = original_bert_score(
preds, targets, model_type=MODEL_NAME, all_layers=True, idf=False, batch_size=3
)
original_score = _parse_original_bert_score(original_score)
metrics_score = metrics_bert_score(
preds, targets, model_name_or_path=MODEL_NAME, all_layers=True, idf=False, batch_size=3
)
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_fn_all_layers_with_idf(preds, targets):
"""Tests for functional and all layers with IDF rescaling."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, all_layers=True, idf=True, batch_size=3)
original_score = _parse_original_bert_score(original_score)
metrics_score = metrics_bert_score(
preds, targets, model_name_or_path=MODEL_NAME, all_layers=True, idf=True, batch_size=3
)
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_fn_all_layers_rescale_with_baseline(preds, targets):
"""Tests for functional with baseline rescaling."""
original_score = original_bert_score(
preds,
targets,
model_type=MODEL_NAME,
lang="en",
num_layers=8,
idf=False,
batch_size=3,
rescale_with_baseline=True,
)
original_score = _parse_original_bert_score(original_score)
metrics_score = metrics_bert_score(
preds,
targets,
model_name_or_path=MODEL_NAME,
lang="en",
num_layers=8,
idf=False,
batch_size=3,
rescale_with_baseline=True,
)
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_fn_rescale_with_baseline(preds, targets):
"""Tests for functional with baseline rescaling with all layers."""
original_score = original_bert_score(
preds,
targets,
model_type=MODEL_NAME,
lang="en",
all_layers=True,
idf=False,
batch_size=3,
rescale_with_baseline=True,
)
original_score = _parse_original_bert_score(original_score)
metrics_score = metrics_bert_score(
preds,
targets,
model_name_or_path=MODEL_NAME,
lang="en",
all_layers=True,
idf=False,
batch_size=3,
rescale_with_baseline=True,
)
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score(preds, targets):
"""Tests for metric."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3)
original_score = _parse_original_bert_score(original_score)
scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3)
scorer.update(preds=preds, target=targets)
metrics_score = scorer.compute()
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_with_idf(preds, targets):
"""Tests for metric with IDF rescaling."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, num_layers=8, idf=True, batch_size=3)
original_score = _parse_original_bert_score(original_score)
scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=True, batch_size=3)
scorer.update(preds=preds, target=targets)
metrics_score = scorer.compute()
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_all_layers(preds, targets):
"""Tests for metric and all layers."""
original_score = original_bert_score(
preds, targets, model_type=MODEL_NAME, all_layers=True, idf=False, batch_size=3
)
original_score = _parse_original_bert_score(original_score)
scorer = BERTScore(model_name_or_path=MODEL_NAME, all_layers=True, idf=False, batch_size=3)
scorer.update(preds=preds, target=targets)
metrics_score = scorer.compute()
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_score_all_layers_with_idf(preds, targets):
"""Tests for metric and all layers with IDF rescaling."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, all_layers=True, idf=True, batch_size=3)
original_score = _parse_original_bert_score(original_score)
scorer = BERTScore(model_name_or_path=MODEL_NAME, all_layers=True, idf=True, batch_size=3)
scorer.update(preds=preds, target=targets)
metrics_score = scorer.compute()
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
@pytest.mark.parametrize(
"preds,targets",
[(preds_batched, targets_batched)],
)
@pytest.mark.skipif(not _BERTSCORE_AVAILABLE, reason="test requires bert_score")
def test_accumulation(preds, targets):
"""Tests for metric works with accumulation."""
original_score = original_bert_score(
sum(preds, []), sum(targets, []), model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3
)
original_score = _parse_original_bert_score(original_score)
scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3)
for p, r in zip(preds, targets):
scorer.update(preds=p, target=r)
metrics_score = scorer.compute()
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
def _bert_score_ddp(rank, world_size, preds, targets, original_score):
"""Define a DDP process for BERTScore."""
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group("gloo", rank=rank, world_size=world_size)
scorer = BERTScore(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3, max_length=128)
scorer.update(preds, targets)
metrics_score = scorer.compute()
for metric in _METRICS:
_assert_list(metrics_score[metric], original_score[metric])
dist.destroy_process_group()
def _test_score_ddp_fn(rank, world_size, preds, targets):
"""Core functionality for the `test_score_ddp` test."""
original_score = original_bert_score(preds, targets, model_type=MODEL_NAME, num_layers=8, idf=False, batch_size=3)
original_score = _parse_original_bert_score(original_score)
_bert_score_ddp(rank, world_size, preds, targets, original_score)
@pytest.mark.parametrize(
"preds,targets",
[(preds, targets)],
)
@pytest.mark.skipif(not (_BERTSCORE_AVAILABLE and dist.is_available()), reason="test requires bert_score")
def test_score_ddp(preds, targets):
"""Tests for metric using DDP."""
world_size = 2
mp.spawn(_test_score_ddp_fn, args=(world_size, preds, targets), nprocs=world_size, join=False)
| [
"torchmetrics.text.bert.BERTScore",
"numpy.allclose",
"torch.distributed.destroy_process_group",
"torch.multiprocessing.spawn",
"pytest.mark.parametrize",
"bert_score.score",
"pytest.mark.skipif",
"torch.distributed.is_available",
"torch.distributed.init_process_group",
"torchmetrics.functional.te... | [((1884, 1944), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (1907, 1944), False, 'import pytest\n'), ((1957, 2036), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (1975, 2036), False, 'import pytest\n'), ((2527, 2587), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (2550, 2587), False, 'import pytest\n'), ((2600, 2679), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (2618, 2679), False, 'import pytest\n'), ((3198, 3258), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (3221, 3258), False, 'import pytest\n'), ((3271, 3350), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (3289, 3350), False, 'import pytest\n'), ((3887, 3947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (3910, 3947), False, 'import pytest\n'), ((3960, 4039), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (3978, 4039), False, 'import pytest\n'), ((4588, 4648), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (4611, 4648), False, 'import pytest\n'), ((4661, 4740), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (4679, 4740), False, 'import pytest\n'), ((5494, 5554), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (5517, 5554), False, 'import pytest\n'), ((5567, 5646), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (5585, 5646), False, 'import pytest\n'), ((6411, 6471), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (6434, 6471), False, 'import pytest\n'), ((6484, 6563), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (6502, 6563), False, 'import pytest\n'), ((7085, 7145), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (7108, 7145), False, 'import pytest\n'), ((7158, 7237), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (7176, 7237), False, 'import pytest\n'), ((7785, 7845), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (7808, 7845), False, 'import pytest\n'), ((7858, 7937), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (7876, 7937), False, 'import pytest\n'), ((8505, 8565), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (8528, 8565), False, 'import pytest\n'), ((8578, 8657), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (8596, 8657), False, 'import pytest\n'), ((9237, 9313), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds_batched, targets_batched)]'], {}), "('preds,targets', [(preds_batched, targets_batched)])\n", (9260, 9313), False, 'import pytest\n'), ((9326, 9405), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _BERTSCORE_AVAILABLE)'], {'reason': '"""test requires bert_score"""'}), "(not _BERTSCORE_AVAILABLE, reason='test requires bert_score')\n", (9344, 9405), False, 'import pytest\n'), ((10976, 11036), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preds,targets"""', '[(preds, targets)]'], {}), "('preds,targets', [(preds, targets)])\n", (10999, 11036), False, 'import pytest\n'), ((1468, 1527), 'numpy.allclose', 'np.allclose', (['preds', 'targets'], {'atol': 'threshold', 'equal_nan': '(True)'}), '(preds, targets, atol=threshold, equal_nan=True)\n', (1479, 1527), True, 'import numpy as np\n'), ((2125, 2226), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, num_layers=8,\n idf=False, batch_size=3)\n', (2144, 2226), True, 'from bert_score import score as original_bert_score\n'), ((2308, 2416), 'torchmetrics.functional.text.bert.bert_score', 'metrics_bert_score', (['preds', 'targets'], {'model_name_or_path': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_name_or_path=MODEL_NAME,\n num_layers=8, idf=False, batch_size=3)\n', (2326, 2416), True, 'from torchmetrics.functional.text.bert import bert_score as metrics_bert_score\n'), ((2796, 2897), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'num_layers': '(12)', 'idf': '(True)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, num_layers=12,\n idf=True, batch_size=3)\n', (2815, 2897), True, 'from bert_score import score as original_bert_score\n'), ((2979, 3087), 'torchmetrics.functional.text.bert.bert_score', 'metrics_bert_score', (['preds', 'targets'], {'model_name_or_path': 'MODEL_NAME', 'num_layers': '(12)', 'idf': '(True)', 'batch_size': '(3)'}), '(preds, targets, model_name_or_path=MODEL_NAME,\n num_layers=12, idf=True, batch_size=3)\n', (2997, 3087), True, 'from torchmetrics.functional.text.bert import bert_score as metrics_bert_score\n'), ((3465, 3569), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, all_layers=True,\n idf=False, batch_size=3)\n', (3484, 3569), True, 'from bert_score import score as original_bert_score\n'), ((3665, 3776), 'torchmetrics.functional.text.bert.bert_score', 'metrics_bert_score', (['preds', 'targets'], {'model_name_or_path': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_name_or_path=MODEL_NAME,\n all_layers=True, idf=False, batch_size=3)\n', (3683, 3776), True, 'from torchmetrics.functional.text.bert import bert_score as metrics_bert_score\n'), ((4182, 4285), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(True)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, all_layers=True,\n idf=True, batch_size=3)\n', (4201, 4285), True, 'from bert_score import score as original_bert_score\n'), ((4367, 4477), 'torchmetrics.functional.text.bert.bert_score', 'metrics_bert_score', (['preds', 'targets'], {'model_name_or_path': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(True)', 'batch_size': '(3)'}), '(preds, targets, model_name_or_path=MODEL_NAME,\n all_layers=True, idf=True, batch_size=3)\n', (4385, 4477), True, 'from torchmetrics.functional.text.bert import bert_score as metrics_bert_score\n'), ((4886, 5026), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'lang': '"""en"""', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)', 'rescale_with_baseline': '(True)'}), "(preds, targets, model_type=MODEL_NAME, lang='en',\n num_layers=8, idf=False, batch_size=3, rescale_with_baseline=True)\n", (4905, 5026), True, 'from bert_score import score as original_bert_score\n'), ((5179, 5326), 'torchmetrics.functional.text.bert.bert_score', 'metrics_bert_score', (['preds', 'targets'], {'model_name_or_path': 'MODEL_NAME', 'lang': '"""en"""', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)', 'rescale_with_baseline': '(True)'}), "(preds, targets, model_name_or_path=MODEL_NAME, lang='en',\n num_layers=8, idf=False, batch_size=3, rescale_with_baseline=True)\n", (5197, 5326), True, 'from torchmetrics.functional.text.bert import bert_score as metrics_bert_score\n'), ((5797, 5940), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'lang': '"""en"""', 'all_layers': '(True)', 'idf': '(False)', 'batch_size': '(3)', 'rescale_with_baseline': '(True)'}), "(preds, targets, model_type=MODEL_NAME, lang='en',\n all_layers=True, idf=False, batch_size=3, rescale_with_baseline=True)\n", (5816, 5940), True, 'from bert_score import score as original_bert_score\n'), ((6093, 6243), 'torchmetrics.functional.text.bert.bert_score', 'metrics_bert_score', (['preds', 'targets'], {'model_name_or_path': 'MODEL_NAME', 'lang': '"""en"""', 'all_layers': '(True)', 'idf': '(False)', 'batch_size': '(3)', 'rescale_with_baseline': '(True)'}), "(preds, targets, model_name_or_path=MODEL_NAME, lang='en',\n all_layers=True, idf=False, batch_size=3, rescale_with_baseline=True)\n", (6111, 6243), True, 'from torchmetrics.functional.text.bert import bert_score as metrics_bert_score\n'), ((6645, 6746), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, num_layers=8,\n idf=False, batch_size=3)\n', (6664, 6746), True, 'from bert_score import score as original_bert_score\n'), ((6821, 6900), 'torchmetrics.text.bert.BERTScore', 'BERTScore', ([], {'model_name_or_path': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)'}), '(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3)\n', (6830, 6900), False, 'from torchmetrics.text.bert import BERTScore\n'), ((7347, 7447), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(True)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, num_layers=8,\n idf=True, batch_size=3)\n', (7366, 7447), True, 'from bert_score import score as original_bert_score\n'), ((7522, 7600), 'torchmetrics.text.bert.BERTScore', 'BERTScore', ([], {'model_name_or_path': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(True)', 'batch_size': '(3)'}), '(model_name_or_path=MODEL_NAME, num_layers=8, idf=True, batch_size=3)\n', (7531, 7600), False, 'from torchmetrics.text.bert import BERTScore\n'), ((8045, 8149), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, all_layers=True,\n idf=False, batch_size=3)\n', (8064, 8149), True, 'from bert_score import score as original_bert_score\n'), ((8238, 8324), 'torchmetrics.text.bert.BERTScore', 'BERTScore', ([], {'model_name_or_path': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(False)', 'batch_size': '(3)'}), '(model_name_or_path=MODEL_NAME, all_layers=True, idf=False,\n batch_size=3)\n', (8247, 8324), False, 'from torchmetrics.text.bert import BERTScore\n'), ((8793, 8896), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(True)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, all_layers=True,\n idf=True, batch_size=3)\n', (8812, 8896), True, 'from bert_score import score as original_bert_score\n'), ((8971, 9056), 'torchmetrics.text.bert.BERTScore', 'BERTScore', ([], {'model_name_or_path': 'MODEL_NAME', 'all_layers': '(True)', 'idf': '(True)', 'batch_size': '(3)'}), '(model_name_or_path=MODEL_NAME, all_layers=True, idf=True,\n batch_size=3)\n', (8980, 9056), False, 'from torchmetrics.text.bert import BERTScore\n'), ((9726, 9805), 'torchmetrics.text.bert.BERTScore', 'BERTScore', ([], {'model_name_or_path': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)'}), '(model_name_or_path=MODEL_NAME, num_layers=8, idf=False, batch_size=3)\n', (9735, 9805), False, 'from torchmetrics.text.bert import BERTScore\n'), ((10225, 10290), 'torch.distributed.init_process_group', 'dist.init_process_group', (['"""gloo"""'], {'rank': 'rank', 'world_size': 'world_size'}), "('gloo', rank=rank, world_size=world_size)\n", (10248, 10290), True, 'import torch.distributed as dist\n'), ((10304, 10403), 'torchmetrics.text.bert.BERTScore', 'BERTScore', ([], {'model_name_or_path': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)', 'max_length': '(128)'}), '(model_name_or_path=MODEL_NAME, num_layers=8, idf=False,\n batch_size=3, max_length=128)\n', (10313, 10403), False, 'from torchmetrics.text.bert import BERTScore\n'), ((10571, 10599), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (10597, 10599), True, 'import torch.distributed as dist\n'), ((10741, 10842), 'bert_score.score', 'original_bert_score', (['preds', 'targets'], {'model_type': 'MODEL_NAME', 'num_layers': '(8)', 'idf': '(False)', 'batch_size': '(3)'}), '(preds, targets, model_type=MODEL_NAME, num_layers=8,\n idf=False, batch_size=3)\n', (10760, 10842), True, 'from bert_score import score as original_bert_score\n'), ((11252, 11351), 'torch.multiprocessing.spawn', 'mp.spawn', (['_test_score_ddp_fn'], {'args': '(world_size, preds, targets)', 'nprocs': 'world_size', 'join': '(False)'}), '(_test_score_ddp_fn, args=(world_size, preds, targets), nprocs=\n world_size, join=False)\n', (11260, 11351), True, 'import torch.multiprocessing as mp\n'), ((11098, 11117), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (11115, 11117), True, 'import torch.distributed as dist\n')] |
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
import os.path as osp
import numpy as np
import itertools
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def smoothed(data, window):
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
if window <= 1:
return data
x = np.asarray(data).flatten()
y = np.ones(window)
z = np.ones(len(x))
smoothed_x = np.convolve(x,y,'same') / np.convolve(z,y,'same')
return smoothed_x
def plot_data(data, xaxis='Epoch', value="AverageEpRet",
yerr=None, condition="Condition1",
smooth=1, paper=False, hidelegend=False,
title=None, savedir=None, clear_xticks=False,
color=None, **kwargs):
x, y = data[xaxis], data[value]
if yerr is not None:
yerr = data[yerr]
ymin = smoothed(y-yerr, smooth)
ymax = smoothed(y+yerr, smooth)
y = smoothed(y, smooth)
font_scale = 1. if paper else 1.5
sns.set(style="darkgrid", font_scale=font_scale)
plt.plot(x, y, color=color, label=data[condition][0])
if yerr is not None:
plt.fill_between(x, ymax, ymin, color=color, alpha=0.1)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=2, mode="expand", borderaxespad=0., fontsize=8)
plt.xlabel(xaxis)
plt.ylabel(value)
xmax = np.max(np.asarray(data[xaxis]))
xscale = xmax > 5e3
if xscale:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
if paper:
plt.gcf().set_size_inches(4.00,3.75)
plt.tight_layout(pad=0.5)
else:
plt.tight_layout(pad=0.5)
if clear_xticks:
x, _ = plt.xticks()
plt.xticks(x, [])
plt.xlabel('')
if hidelegend:
plt.legend().remove()
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root,'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
try:
exp_data = pd.read_table(os.path.join(root,'progress.txt'))
except:
print('Could not read from %s'%os.path.join(root,'progress.txt'))
continue
performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
exp_data.insert(len(exp_data.columns),'Unit',unit)
exp_data.insert(len(exp_data.columns),'Condition1',condition1)
exp_data.insert(len(exp_data.columns),'Condition2',condition2)
exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance])
datasets.append(exp_data)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1]=='/':
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x : osp.join(basedir, x)
prefix = logdir.split('/')[-1]
listdir= os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not(x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '='*DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '='*DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not(legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def make_plots(all_logdirs, legend=None, xaxis=None, value=None, count=False,
font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean',
paper=False, hidelegend=False, title=None, savedir=None, show=True,
clear_xticks=False, y_horiz=None):
all_data = get_all_datasets(all_logdirs, legend, select, exclude)
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator)
palette = itertools.cycle(sns.color_palette())
if 'Average' in value:
yerr = value.replace('Average', 'Std')
else:
yerr = None
plt.figure()
for data in all_data:
color = next(palette)
plot_data(data, xaxis=xaxis, value=value, condition=condition,
smooth=smooth, estimator=estimator,
paper=paper, hidelegend=hidelegend,
title=title, savedir=savedir, yerr=yerr,
clear_xticks=clear_xticks, color=color)
if y_horiz is not None:
# y, xmin, xmax, colors='k', linestyles='solid', label='',
xmax = np.max(np.asarray(data[xaxis]))
plt.hlines(y_horiz, 0, xmax, colors='red', linestyles='dashed', label='limit')
if savedir is not '':
fname = osp.join(savedir, title).lower()
if clear_xticks:
fname += '_nox'
if hidelegend:
fname += '_nolegend'
os.makedirs(savedir, exist_ok=True)
plt.savefig(fname+'.pdf', format='pdf')
if show:
plt.show()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')
parser.add_argument('--value', '-y', type=str, default='AverageEpRet')
parser.add_argument('--count', action='store_true')
parser.add_argument('--smooth', '-s', type=int, default=1)
parser.add_argument('--select', nargs='*')
parser.add_argument('--exclude', nargs='*')
parser.add_argument('--est', default='mean')
parser.add_argument('--paper', action='store_true')
parser.add_argument('--hidelegend', '-hl', action='store_true')
parser.add_argument('--title', type=str, default='')
parser.add_argument('--savedir', type=str, default='')
parser.add_argument('--dont_show', action='store_true')
parser.add_argument('--clearx', action='store_true')
parser.add_argument('--climit', type=float, default=None)
args = parser.parse_args()
"""
Args:
logdir (strings): As many log directories (or prefixes to log
directories, which the plotter will autocomplete internally) as
you'd like to plot from.
legend (strings): Optional way to specify legend for the plot. The
plotter legend will automatically use the ``exp_name`` from the
config.json file, unless you tell it otherwise through this flag.
This only works if you provide a name for each directory that
will get plotted. (Note: this may not be the same as the number
of logdir args you provide! Recall that the plotter looks for
autocompletes of the logdir args: there may be more than one
match for a given logdir prefix, and you will need to provide a
legend string for each one of those matches---unless you have
removed some of them as candidates via selection or exclusion
rules (below).)
xaxis (string): Pick what column from data is used for the x-axis.
Defaults to ``TotalEnvInteracts``.
value (strings): Pick what columns from data to graph on the y-axis.
Submitting multiple values will produce multiple graphs. Defaults
to ``Performance``, which is not an actual output of any algorithm.
Instead, ``Performance`` refers to either ``AverageEpRet``, the
correct performance measure for the on-policy algorithms, or
``AverageTestEpRet``, the correct performance measure for the
off-policy algorithms. The plotter will automatically figure out
which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for
each separate logdir.
count: Optional flag. By default, the plotter shows y-values which
are averaged across all results that share an ``exp_name``,
which is typically a set of identical experiments that only vary
in random seed. But if you'd like to see all of those curves
separately, use the ``--count`` flag.
smooth (int): Smooth data by averaging it over a fixed window. This
parameter says how wide the averaging window will be.
select (strings): Optional selection rule: the plotter will only show
curves from logdirs that contain all of these substrings.
exclude (strings): Optional exclusion rule: plotter will only show
curves from logdirs that do not contain these substrings.
"""
make_plots(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est, paper=args.paper, hidelegend=args.hidelegend,
title=args.title, savedir=args.savedir, show=not(args.dont_show),
clear_xticks=args.clearx, y_horiz=args.climit)
if __name__ == "__main__":
main()
| [
"numpy.convolve",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"os.walk",
"seaborn.set",
"os.listdir",
"argparse.ArgumentParser",
"seaborn.color_palette",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"os.path.isdir",
"matplotlib.pyplot.savefig",
"... | [((582, 597), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (589, 597), True, 'import numpy as np\n'), ((1214, 1262), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""', 'font_scale': 'font_scale'}), "(style='darkgrid', font_scale=font_scale)\n", (1221, 1262), True, 'import seaborn as sns\n'), ((1270, 1323), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'color', 'label': 'data[condition][0]'}), '(x, y, color=color, label=data[condition][0])\n', (1278, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1545), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '"""lower left"""', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)', 'fontsize': '(8)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='lower left', ncol=2,\n mode='expand', borderaxespad=0.0, fontsize=8)\n", (1429, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1571), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xaxis'], {}), '(xaxis)\n', (1564, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['value'], {}), '(value)\n', (1586, 1593), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2337), 'os.walk', 'os.walk', (['logdir'], {}), '(logdir)\n', (2329, 2337), False, 'import os\n'), ((6049, 6061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6059, 6061), True, 'import matplotlib.pyplot as plt\n'), ((7012, 7037), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7035, 7037), False, 'import argparse\n'), ((639, 664), 'numpy.convolve', 'np.convolve', (['x', 'y', '"""same"""'], {}), "(x, y, 'same')\n", (650, 664), True, 'import numpy as np\n'), ((665, 690), 'numpy.convolve', 'np.convolve', (['z', 'y', '"""same"""'], {}), "(z, y, 'same')\n", (676, 690), True, 'import numpy as np\n'), ((1358, 1413), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'ymax', 'ymin'], {'color': 'color', 'alpha': '(0.1)'}), '(x, ymax, ymin, color=color, alpha=0.1)\n', (1374, 1413), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1636), 'numpy.asarray', 'np.asarray', (['data[xaxis]'], {}), '(data[xaxis])\n', (1623, 1636), True, 'import numpy as np\n'), ((1685, 1746), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (1705, 1746), True, 'import matplotlib.pyplot as plt\n'), ((1814, 1839), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (1830, 1839), True, 'import matplotlib.pyplot as plt\n'), ((1858, 1883), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (1874, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1933), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (1931, 1933), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1959), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', '[]'], {}), '(x, [])\n', (1952, 1959), True, 'import matplotlib.pyplot as plt\n'), ((1968, 1982), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1978, 1982), True, 'import matplotlib.pyplot as plt\n'), ((5918, 5937), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (5935, 5937), True, 'import seaborn as sns\n'), ((6569, 6647), 'matplotlib.pyplot.hlines', 'plt.hlines', (['y_horiz', '(0)', 'xmax'], {'colors': '"""red"""', 'linestyles': '"""dashed"""', 'label': '"""limit"""'}), "(y_horiz, 0, xmax, colors='red', linestyles='dashed', label='limit')\n", (6579, 6647), True, 'import matplotlib.pyplot as plt\n'), ((6848, 6883), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (6859, 6883), False, 'import os\n'), ((6892, 6933), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '.pdf')"], {'format': '"""pdf"""'}), "(fname + '.pdf', format='pdf')\n", (6903, 6933), True, 'import matplotlib.pyplot as plt\n'), ((6954, 6964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6962, 6964), True, 'import matplotlib.pyplot as plt\n'), ((547, 563), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (557, 563), True, 'import numpy as np\n'), ((4066, 4083), 'os.path.isdir', 'osp.isdir', (['logdir'], {}), '(logdir)\n', (4075, 4083), True, 'import os.path as osp\n'), ((4173, 4192), 'os.path.dirname', 'osp.dirname', (['logdir'], {}), '(logdir)\n', (4184, 4192), True, 'import os.path as osp\n'), ((4311, 4330), 'os.listdir', 'os.listdir', (['basedir'], {}), '(basedir)\n', (4321, 4330), False, 'import os\n'), ((6536, 6559), 'numpy.asarray', 'np.asarray', (['data[xaxis]'], {}), '(data[xaxis])\n', (6546, 6559), True, 'import numpy as np\n'), ((1769, 1778), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1776, 1778), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2023), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2021, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2514, 2536), 'json.load', 'json.load', (['config_path'], {}), '(config_path)\n', (2523, 2536), False, 'import json\n'), ((4226, 4246), 'os.path.join', 'osp.join', (['basedir', 'x'], {}), '(basedir, x)\n', (4234, 4246), True, 'import os.path as osp\n'), ((6691, 6715), 'os.path.join', 'osp.join', (['savedir', 'title'], {}), '(savedir, title)\n', (6699, 6715), True, 'import os.path as osp\n'), ((2455, 2488), 'os.path.join', 'os.path.join', (['root', '"""config.json"""'], {}), "(root, 'config.json')\n", (2467, 2488), False, 'import os\n'), ((3046, 3080), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (3058, 3080), False, 'import os\n'), ((3148, 3182), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (3160, 3182), False, 'import os\n')] |
"""
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
Author: <NAME> <<EMAIL>>
"""
import numpy as np
import numba
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
def ncp_hals(
X, rank, mask=None, random_state=None, init='rand',
skip_modes=[], negative_modes=[], **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
Binary tensor, same shape as X, specifying censored or missing data values
at locations where (mask == 0) and observed data where (mask == 1).
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
skip_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor that are not fit. This can be
used to fix certain factor matrices that have been previously
fit.
negative_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor whose factors are not constrained
to be nonnegative.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and <NAME>. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
if mask is not None:
X = np.copy(X)
X[~mask] = np.mean(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = np.linalg.norm(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
for n in range(X.ndim):
# Skip modes that are specified as fixed.
if n in skip_modes:
continue
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = np.prod([arr.T @ arr for arr in components], axis=0)
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
Xmkr = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, Xmkr, n not in negative_modes)
# iv) Update masked elements.
if mask is not None:
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if mask is None:
# Determine mode that was fit last.
n = np.setdiff1d(np.arange(X.ndim), skip_modes).max()
# Add contribution of last fit factors to gram matrix.
grams *= U[n].T @ U[n]
residsq = np.sum(grams) - 2 * np.sum(U[n] * Xmkr) + (normX ** 2)
result.update(np.sqrt(residsq) / normX)
else:
result.update(np.linalg.norm(X - pred) / normX)
# end optimization loop, return result.
return result.finalize()
@numba.jit(nopython=True)
def _hals_update(factors, grams, Xmkr, nonneg):
dim = factors.shape[0]
rank = factors.shape[1]
indices = np.arange(rank)
# Handle special case of rank-1 model.
if rank == 1:
if nonneg:
factors[:] = np.maximum(0.0, Xmkr / grams[0, 0])
else:
factors[:] = Xmkr / grams[0, 0]
# Do a few inner iterations.
else:
for itr in range(3):
for p in range(rank):
idx = (indices != p)
Cp = factors[:, idx] @ grams[idx][:, p]
r = (Xmkr[:, p] - Cp) / np.maximum(grams[p, p], 1e-6)
if nonneg:
factors[:, p] = np.maximum(r, 0.0)
else:
factors[:, p] = r
| [
"tensortools.optimize.optim_utils._check_cpd_inputs",
"numpy.copy",
"numpy.mean",
"numpy.prod",
"tensortools.operations.khatri_rao",
"tensortools.operations.unfold",
"numpy.sqrt",
"tensortools.optimize.FitResult",
"tensortools.optimize.optim_utils._get_initial_ktensor",
"numpy.sum",
"numba.jit",... | [((5575, 5599), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5584, 5599), False, 'import numba\n'), ((3403, 3441), 'tensortools.optimize.optim_utils._check_cpd_inputs', 'optim_utils._check_cpd_inputs', (['X', 'rank'], {}), '(X, rank)\n', (3432, 3441), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3484, 3545), 'tensortools.optimize.optim_utils._get_initial_ktensor', 'optim_utils._get_initial_ktensor', (['init', 'X', 'rank', 'random_state'], {}), '(init, X, rank, random_state)\n', (3516, 3545), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3559, 3594), 'tensortools.optimize.FitResult', 'FitResult', (['U', '"""NCP_HALS"""'], {}), "(U, 'NCP_HALS', **options)\n", (3568, 3594), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3640, 3657), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {}), '(X)\n', (3654, 3657), True, 'import numpy as np\n'), ((5718, 5733), 'numpy.arange', 'np.arange', (['rank'], {}), '(rank)\n', (5727, 5733), True, 'import numpy as np\n'), ((3331, 3341), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (3338, 3341), True, 'import numpy as np\n'), ((3361, 3377), 'numpy.mean', 'np.mean', (['X[mask]'], {}), '(X[mask])\n', (3368, 3377), True, 'import numpy as np\n'), ((4390, 4444), 'numpy.prod', 'np.prod', (['[(arr.T @ arr) for arr in components]'], {'axis': '(0)'}), '([(arr.T @ arr) for arr in components], axis=0)\n', (4397, 4444), True, 'import numpy as np\n'), ((4507, 4529), 'tensortools.operations.khatri_rao', 'khatri_rao', (['components'], {}), '(components)\n', (4517, 4529), False, 'from tensortools.operations import unfold, khatri_rao\n'), ((5840, 5875), 'numpy.maximum', 'np.maximum', (['(0.0)', '(Xmkr / grams[0, 0])'], {}), '(0.0, Xmkr / grams[0, 0])\n', (5850, 5875), True, 'import numpy as np\n'), ((4549, 4561), 'tensortools.operations.unfold', 'unfold', (['X', 'n'], {}), '(X, n)\n', (4555, 4561), False, 'from tensortools.operations import unfold, khatri_rao\n'), ((5316, 5329), 'numpy.sum', 'np.sum', (['grams'], {}), '(grams)\n', (5322, 5329), True, 'import numpy as np\n'), ((5397, 5413), 'numpy.sqrt', 'np.sqrt', (['residsq'], {}), '(residsq)\n', (5404, 5413), True, 'import numpy as np\n'), ((5464, 5488), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - pred)'], {}), '(X - pred)\n', (5478, 5488), True, 'import numpy as np\n'), ((6174, 6204), 'numpy.maximum', 'np.maximum', (['grams[p, p]', '(1e-06)'], {}), '(grams[p, p], 1e-06)\n', (6184, 6204), True, 'import numpy as np\n'), ((6268, 6286), 'numpy.maximum', 'np.maximum', (['r', '(0.0)'], {}), '(r, 0.0)\n', (6278, 6286), True, 'import numpy as np\n'), ((5154, 5171), 'numpy.arange', 'np.arange', (['X.ndim'], {}), '(X.ndim)\n', (5163, 5171), True, 'import numpy as np\n'), ((5336, 5355), 'numpy.sum', 'np.sum', (['(U[n] * Xmkr)'], {}), '(U[n] * Xmkr)\n', (5342, 5355), True, 'import numpy as np\n')] |
#pythran export conv(float[][], float[][])
#runas import numpy as np ; x = np.tri(300,300)*0.5 ; w = np.tri(5,5)*0.25 ; conv(x,w)
#bench import numpy as np ; x = np.tri(150,150)*0.5 ; w = np.tri(5,5)*0.25 ; conv(x,w)
import numpy as np
def clamp(i, offset, maxval):
j = max(0, i + offset)
return min(j, maxval)
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx,max(idx,-idx))
def conv(x, weights):
sx = x.shape
sw = weights.shape
result = np.zeros_like(x)
for i in xrange(sx[0]):
for j in xrange(sx[1]):
for ii in xrange(sw[0]):
for jj in xrange(sw[1]):
idx = clamp(i, ii-sw[0]//2, sw[0]), clamp(j, jj-sw[0]//2, sw[0])
result[i, j] += x[idx] * weights[ii, jj]
return result
| [
"numpy.zeros_like"
] | [((499, 515), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (512, 515), True, 'import numpy as np\n')] |
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import itertools
class SOM(object):
def __init__(self,h,w,dim_feat):
"""
Construction of a zero-filled SOM.
h,w,dim_feat: constructs a (h,w,dim_feat) SOM.
"""
self.shape = (h,w,dim_feat)
self.som = np.zeros((h,w,dim_feat))
# Training parameters
self.L0 = 0.0
self.lam = 0.0
self.sigma0 = 0.0
self.data = []
self.hit_score = np.zeros((h,w))
def train(self,data,L0,lam,sigma0,initializer=np.random.rand,frames=None):
"""
Training procedure for a SOM.
data: a N*d matrix, N the number of examples,
d the same as dim_feat=self.shape[2].
L0,lam,sigma0: training parameters.
initializer: a function taking h,w and dim_feat (*self.shape) as
parameters and returning an initial (h,w,dim_feat) tensor.
frames: saves intermediate frames if not None.
"""
self.L0 = L0
self.lam = lam
self.sigma0 = sigma0
self.som = initializer(*self.shape)
self.data = data
for t in itertools.count():
if frames != None:
frames.append(self.som.copy())
if self.sigma(t) < 0.5:
print("final t:", t)
#print("quantization error:", self.quant_err())
break
i_data = np.random.choice(range(len(data)))
bmu = self.find_bmu(data[i_data])
self.hit_score[bmu] += 1
self.update_som(bmu,data[i_data],t)
def quant_err(self):
"""
Computes the quantization error of the SOM.
It uses the data fed at last training.
"""
bmu_dists = []
for input_vector in self.data:
bmu = self.find_bmu(input_vector)
bmu_feat = self.som[bmu]
bmu_dists.append(np.linalg.norm(input_vector-bmu_feat))
return np.array(bmu_dists).mean()
def find_bmu(self, input_vec):
"""
Find the BMU of a given input vector.
input_vec: a d=dim_feat=self.shape[2] input vector.
"""
list_bmu = []
for y in range(self.shape[0]):
for x in range(self.shape[1]):
dist = np.linalg.norm((input_vec-self.som[y,x]))
list_bmu.append(((y,x),dist))
list_bmu.sort(key=lambda x: x[1])
return list_bmu[0][0]
def update_som(self,bmu,input_vector,t):
"""
Calls the update rule on each cell.
bmu: (y,x) BMU's coordinates.
input_vector: current data vector.
t: current time.
"""
for y in range(self.shape[0]):
for x in range(self.shape[1]):
dist_to_bmu = np.linalg.norm((np.array(bmu)-np.array((y,x))))
self.update_cell((y,x),dist_to_bmu,input_vector,t)
def update_cell(self,cell,dist_to_bmu,input_vector,t):
"""
Computes the update rule on a cell.
cell: (y,x) cell's coordinates.
dist_to_bmu: L2 distance from cell to bmu.
input_vector: current data vector.
t: current time.
"""
self.som[cell] += self.N(dist_to_bmu,t)*self.L(t)*(input_vector-self.som[cell])
def update_bmu(self,bmu,input_vector,t):
"""
Update rule for the BMU.
bmu: (y,x) BMU's coordinates.
input_vector: current data vector.
t: current time.
"""
self.som[bmu] += self.L(t)*(input_vector-self.som[bmu])
def L(self, t):
"""
Learning rate formula.
t: current time.
"""
return self.L0*np.exp(-t/self.lam)
def N(self,dist_to_bmu,t):
"""
Computes the neighbouring penalty.
dist_to_bmu: L2 distance to bmu.
t: current time.
"""
curr_sigma = self.sigma(t)
return np.exp(-(dist_to_bmu**2)/(2*curr_sigma**2))
def sigma(self, t):
"""
Neighbouring radius formula.
t: current time.
"""
return self.sigma0*np.exp(-t/self.lam) | [
"numpy.exp",
"numpy.array",
"numpy.zeros",
"itertools.count",
"numpy.linalg.norm"
] | [((1351, 1377), 'numpy.zeros', 'np.zeros', (['(h, w, dim_feat)'], {}), '((h, w, dim_feat))\n', (1359, 1377), True, 'import numpy as np\n'), ((1544, 1560), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (1552, 1560), True, 'import numpy as np\n'), ((2277, 2294), 'itertools.count', 'itertools.count', ([], {}), '()\n', (2292, 2294), False, 'import itertools\n'), ((5153, 5202), 'numpy.exp', 'np.exp', (['(-dist_to_bmu ** 2 / (2 * curr_sigma ** 2))'], {}), '(-dist_to_bmu ** 2 / (2 * curr_sigma ** 2))\n', (5159, 5202), True, 'import numpy as np\n'), ((4904, 4925), 'numpy.exp', 'np.exp', (['(-t / self.lam)'], {}), '(-t / self.lam)\n', (4910, 4925), True, 'import numpy as np\n'), ((5343, 5364), 'numpy.exp', 'np.exp', (['(-t / self.lam)'], {}), '(-t / self.lam)\n', (5349, 5364), True, 'import numpy as np\n'), ((3081, 3120), 'numpy.linalg.norm', 'np.linalg.norm', (['(input_vector - bmu_feat)'], {}), '(input_vector - bmu_feat)\n', (3095, 3120), True, 'import numpy as np\n'), ((3135, 3154), 'numpy.array', 'np.array', (['bmu_dists'], {}), '(bmu_dists)\n', (3143, 3154), True, 'import numpy as np\n'), ((3463, 3505), 'numpy.linalg.norm', 'np.linalg.norm', (['(input_vec - self.som[y, x])'], {}), '(input_vec - self.som[y, x])\n', (3477, 3505), True, 'import numpy as np\n'), ((3988, 4001), 'numpy.array', 'np.array', (['bmu'], {}), '(bmu)\n', (3996, 4001), True, 'import numpy as np\n'), ((4002, 4018), 'numpy.array', 'np.array', (['(y, x)'], {}), '((y, x))\n', (4010, 4018), True, 'import numpy as np\n')] |
import numpy as np
from .other import clip_boxes
from .text_proposal_graph_builder import TextProposalGraphBuilder
class TextProposalConnector:
def __init__(self):
self.graph_builder=TextProposalGraphBuilder()
def group_text_proposals(self, text_proposals, scores, im_size):
graph=self.graph_builder.build_graph(text_proposals, scores, im_size)
return graph.sub_graphs_connected()
def fit_y(self, X, Y, x1, x2):
len(X)!=0
# if X only include one point, the function will get line y=Y[0]
if np.sum(X==X[0])==len(X):
return Y[0], Y[0]
p=np.poly1d(np.polyfit(X, Y, 1))
return p(x1), p(x2)
def get_text_lines(self, text_proposals, scores, im_size):
# tp=text proposal
tp_groups=self.group_text_proposals(text_proposals, scores, im_size)
text_lines=np.zeros((len(tp_groups), 5), np.float32)
for index, tp_indices in enumerate(tp_groups):
text_line_boxes=text_proposals[list(tp_indices)]
x0=np.min(text_line_boxes[:, 0])
x1=np.max(text_line_boxes[:, 2])
offset=(text_line_boxes[0, 2]-text_line_boxes[0, 0])*0.5
lt_y, rt_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0+offset, x1-offset)
lb_y, rb_y=self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0+offset, x1-offset)
# the score of a text line is the average score of the scores
# of all text proposals contained in the text line
score=scores[list(tp_indices)].sum()/float(len(tp_indices))
text_lines[index, 0]=x0
text_lines[index, 1]=min(lt_y, rt_y)
text_lines[index, 2]=x1
text_lines[index, 3]=max(lb_y, rb_y)
text_lines[index, 4]=score
text_lines=clip_boxes(text_lines, im_size)
text_recs = np.zeros((len(text_lines), 9), np.float)
index = 0
for line in text_lines:
xmin,ymin,xmax,ymax=line[0],line[1],line[2],line[3]
text_recs[index, 0] = xmin
text_recs[index, 1] = ymin
text_recs[index, 2] = xmax
text_recs[index, 3] = ymin
text_recs[index, 4] = xmin
text_recs[index, 5] = ymax
text_recs[index, 6] = xmax
text_recs[index, 7] = ymax
text_recs[index, 8] = line[4]
index = index + 1
return text_recs
| [
"numpy.max",
"numpy.sum",
"numpy.min",
"numpy.polyfit"
] | [((568, 585), 'numpy.sum', 'np.sum', (['(X == X[0])'], {}), '(X == X[0])\n', (574, 585), True, 'import numpy as np\n'), ((645, 664), 'numpy.polyfit', 'np.polyfit', (['X', 'Y', '(1)'], {}), '(X, Y, 1)\n', (655, 664), True, 'import numpy as np\n'), ((1067, 1096), 'numpy.min', 'np.min', (['text_line_boxes[:, 0]'], {}), '(text_line_boxes[:, 0])\n', (1073, 1096), True, 'import numpy as np\n'), ((1113, 1142), 'numpy.max', 'np.max', (['text_line_boxes[:, 2]'], {}), '(text_line_boxes[:, 2])\n', (1119, 1142), True, 'import numpy as np\n')] |
from typing import List
import numpy as np
import pandas as pd
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.one_hot import OneHotEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
from sklearn.model_selection import RepeatedStratifiedKFold
def get_single_encoder(encoder_name: str, cat_cols: list):
"""
Get encoder by its name
:param encoder_name: Name of desired encoder
:param cat_cols: Cat columns for encoding
:return: Categorical encoder
"""
if encoder_name == "FrequencyEncoder":
encoder = FrequencyEncoder(cols=cat_cols)
if encoder_name == "WOEEncoder":
encoder = WOEEncoder(cols=cat_cols)
if encoder_name == "TargetEncoder":
encoder = TargetEncoder(cols=cat_cols)
if encoder_name == "SumEncoder":
encoder = SumEncoder(cols=cat_cols)
if encoder_name == "MEstimateEncoder":
encoder = MEstimateEncoder(cols=cat_cols)
if encoder_name == "LeaveOneOutEncoder":
encoder = LeaveOneOutEncoder(cols=cat_cols)
if encoder_name == "HelmertEncoder":
encoder = HelmertEncoder(cols=cat_cols)
if encoder_name == "BackwardDifferenceEncoder":
encoder = BackwardDifferenceEncoder(cols=cat_cols)
if encoder_name == "JamesSteinEncoder":
encoder = JamesSteinEncoder(cols=cat_cols)
if encoder_name == "OrdinalEncoder":
encoder = OrdinalEncoder(cols=cat_cols)
if encoder_name == "CatBoostEncoder":
encoder = CatBoostEncoder(cols=cat_cols)
if encoder_name == "MEstimateEncoder":
encoder = MEstimateEncoder(cols=cat_cols)
if encoder_name == "OneHotEncoder":
encoder = OneHotEncoder(cols=cat_cols)
if encoder is None:
raise NotImplementedError("To be implemented")
return encoder
class DoubleValidationEncoderNumerical:
"""
Encoder with validation within
"""
def __init__(self, cols, encoders_names_tuple=()):
"""
:param cols: Categorical columns
:param encoders_names_tuple: Tuple of str with encoders
"""
self.cols, self.num_cols = cols, None
self.encoders_names_tuple = encoders_names_tuple
self.n_folds, self.n_repeats = 5, 3
self.model_validation = RepeatedStratifiedKFold(
n_splits=self.n_folds, n_repeats=self.n_repeats, random_state=0
)
self.encoders_dict = {}
self.storage = None
def fit_transform(self, X: pd.DataFrame, y: np.array) -> pd.DataFrame:
self.num_cols = [col for col in X.columns if col not in self.cols]
self.storage = []
for encoder_name in self.encoders_names_tuple:
for n_fold, (train_idx, val_idx) in enumerate(
self.model_validation.split(X, y)
):
encoder = get_single_encoder(encoder_name, self.cols)
X_train, X_val = (
X.loc[train_idx].reset_index(drop=True),
X.loc[val_idx].reset_index(drop=True),
)
y_train, y_val = y[train_idx], y[val_idx]
_ = encoder.fit_transform(X_train, y_train)
# transform validation part and get all necessary cols
val_t = encoder.transform(X_val)
val_t = val_t[
[col for col in val_t.columns if col not in self.num_cols]
].values
if encoder_name not in self.encoders_dict.keys():
cols_representation = np.zeros((X.shape[0], val_t.shape[1]))
self.encoders_dict[encoder_name] = [encoder]
else:
self.encoders_dict[encoder_name].append(encoder)
cols_representation[val_idx, :] += val_t / self.n_repeats
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
for df in self.storage:
X = pd.concat([X, df], axis=1)
X.drop(self.cols, axis=1, inplace=True)
return X
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
self.storage = []
for encoder_name in self.encoders_names_tuple:
cols_representation = None
for encoder in self.encoders_dict[encoder_name]:
test_tr = encoder.transform(X)
test_tr = test_tr[
[col for col in test_tr.columns if col not in self.num_cols]
].values
if cols_representation is None:
cols_representation = np.zeros(test_tr.shape)
cols_representation = (
cols_representation + test_tr / self.n_folds / self.n_repeats
)
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
for df in self.storage:
X = pd.concat([X, df], axis=1)
X.drop(self.cols, axis=1, inplace=True)
return X
class MultipleEncoder:
"""
Multiple encoder for categorical columns
"""
def __init__(self, cols: List[str], encoders_names_tuple=()):
"""
:param cols: List of categorical columns
:param encoders_names_tuple: Tuple of categorical encoders names. Possible values in tuple are:
"FrequencyEncoder", "WOEEncoder", "TargetEncoder", "SumEncoder", "MEstimateEncoder", "LeaveOneOutEncoder",
"HelmertEncoder", "BackwardDifferenceEncoder", "JamesSteinEncoder", "OrdinalEncoder""CatBoostEncoder"
"""
self.cols = cols
self.num_cols = None
self.encoders_names_tuple = encoders_names_tuple
self.encoders_dict = {}
# list for storing results of transformation from each encoder
self.storage = None
def fit_transform(self, X: pd.DataFrame, y: np.array) -> pd.DataFrame:
self.num_cols = [col for col in X.columns if col not in self.cols]
self.storage = []
for encoder_name in self.encoders_names_tuple:
encoder = get_single_encoder(encoder_name=encoder_name, cat_cols=self.cols)
cols_representation = encoder.fit_transform(X, y)
self.encoders_dict[encoder_name] = encoder
cols_representation = cols_representation[
[col for col in cols_representation.columns if col not in self.num_cols]
].values
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
# concat cat cols representations with initial dataframe
for df in self.storage:
X = pd.concat([X, df], axis=1)
# remove all columns as far as we have their representations
X.drop(self.cols, axis=1, inplace=True)
return X
def transform(self, X) -> pd.DataFrame:
self.storage = []
for encoder_name in self.encoders_names_tuple:
# get representation of cat columns and form a pd.DataFrame for it
cols_representation = self.encoders_dict[encoder_name].transform(X)
cols_representation = cols_representation[
[col for col in cols_representation.columns if col not in self.num_cols]
].values
cols_representation = pd.DataFrame(cols_representation)
cols_representation.columns = [
f"encoded_{encoder_name}_{i}"
for i in range(cols_representation.shape[1])
]
self.storage.append(cols_representation)
# concat cat cols representations with initial dataframe
for df in self.storage:
X = pd.concat([X, df], axis=1)
# remove all columns as far as we have their representations
X.drop(self.cols, axis=1, inplace=True)
return X
class FrequencyEncoder:
def __init__(self, cols):
self.cols = cols
self.counts_dict = None
def fit(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
counts_dict = {}
for col in self.cols:
values, counts = np.unique(X[col], return_counts=True)
counts_dict[col] = dict(zip(values, counts))
self.counts_dict = counts_dict
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
counts_dict_test = {}
res = []
for col in self.cols:
values, counts = np.unique(X[col], return_counts=True)
counts_dict_test[col] = dict(zip(values, counts))
# if value is in "train" keys - replace "test" counts with "train" counts
for k in [
key
for key in counts_dict_test[col].keys()
if key in self.counts_dict[col].keys()
]:
counts_dict_test[col][k] = self.counts_dict[col][k]
res.append(X[col].map(counts_dict_test[col]).values.reshape(-1, 1))
res = np.hstack(res)
X[self.cols] = res
return X
def fit_transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
self.fit(X, y)
X = self.transform(X)
return X
if __name__ == "__main__":
df = pd.DataFrame({})
df["cat_col"] = [1, 2, 3, 1, 2, 3, 1, 1, 1]
df["target"] = [0, 1, 0, 1, 0, 1, 0, 1, 0]
#
temp = df.copy()
enc = CatBoostEncoder(cols=["cat_col"])
print(enc.fit_transform(temp, temp["target"]))
#
temp = df.copy()
enc = MultipleEncoder(cols=["cat_col"], encoders_names_tuple=("CatBoostEncoder",))
print(enc.fit_transform(temp, temp["target"]))
#
temp = df.copy()
enc = DoubleValidationEncoderNumerical(
cols=["cat_col"], encoders_names_tuple=("CatBoostEncoder",)
)
print(enc.fit_transform(temp, temp["target"]))
| [
"category_encoders.target_encoder.TargetEncoder",
"category_encoders.backward_difference.BackwardDifferenceEncoder",
"category_encoders.one_hot.OneHotEncoder",
"category_encoders.james_stein.JamesSteinEncoder",
"numpy.unique",
"category_encoders.cat_boost.CatBoostEncoder",
"numpy.hstack",
"category_en... | [((10098, 10114), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (10110, 10114), True, 'import pandas as pd\n'), ((10248, 10281), 'category_encoders.cat_boost.CatBoostEncoder', 'CatBoostEncoder', ([], {'cols': "['cat_col']"}), "(cols=['cat_col'])\n", (10263, 10281), False, 'from category_encoders.cat_boost import CatBoostEncoder\n'), ((1133, 1158), 'category_encoders.woe.WOEEncoder', 'WOEEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1143, 1158), False, 'from category_encoders.woe import WOEEncoder\n'), ((1218, 1246), 'category_encoders.target_encoder.TargetEncoder', 'TargetEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1231, 1246), False, 'from category_encoders.target_encoder import TargetEncoder\n'), ((1303, 1328), 'category_encoders.sum_coding.SumEncoder', 'SumEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1313, 1328), False, 'from category_encoders.sum_coding import SumEncoder\n'), ((1391, 1422), 'category_encoders.m_estimate.MEstimateEncoder', 'MEstimateEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1407, 1422), False, 'from category_encoders.m_estimate import MEstimateEncoder\n'), ((1487, 1520), 'category_encoders.leave_one_out.LeaveOneOutEncoder', 'LeaveOneOutEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1505, 1520), False, 'from category_encoders.leave_one_out import LeaveOneOutEncoder\n'), ((1581, 1610), 'category_encoders.helmert.HelmertEncoder', 'HelmertEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1595, 1610), False, 'from category_encoders.helmert import HelmertEncoder\n'), ((1682, 1722), 'category_encoders.backward_difference.BackwardDifferenceEncoder', 'BackwardDifferenceEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1707, 1722), False, 'from category_encoders.backward_difference import BackwardDifferenceEncoder\n'), ((1786, 1818), 'category_encoders.james_stein.JamesSteinEncoder', 'JamesSteinEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1803, 1818), False, 'from category_encoders.james_stein import JamesSteinEncoder\n'), ((1879, 1908), 'category_encoders.ordinal.OrdinalEncoder', 'OrdinalEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1893, 1908), False, 'from category_encoders.ordinal import OrdinalEncoder\n'), ((1970, 2000), 'category_encoders.cat_boost.CatBoostEncoder', 'CatBoostEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (1985, 2000), False, 'from category_encoders.cat_boost import CatBoostEncoder\n'), ((2063, 2094), 'category_encoders.m_estimate.MEstimateEncoder', 'MEstimateEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (2079, 2094), False, 'from category_encoders.m_estimate import MEstimateEncoder\n'), ((2153, 2181), 'category_encoders.one_hot.OneHotEncoder', 'OneHotEncoder', ([], {'cols': 'cat_cols'}), '(cols=cat_cols)\n', (2166, 2181), False, 'from category_encoders.one_hot import OneHotEncoder\n'), ((2738, 2830), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': 'self.n_folds', 'n_repeats': 'self.n_repeats', 'random_state': '(0)'}), '(n_splits=self.n_folds, n_repeats=self.n_repeats,\n random_state=0)\n', (2761, 2830), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((9859, 9873), 'numpy.hstack', 'np.hstack', (['res'], {}), '(res)\n', (9868, 9873), True, 'import numpy as np\n'), ((4299, 4332), 'pandas.DataFrame', 'pd.DataFrame', (['cols_representation'], {}), '(cols_representation)\n', (4311, 4332), True, 'import pandas as pd\n'), ((4600, 4626), 'pandas.concat', 'pd.concat', (['[X, df]'], {'axis': '(1)'}), '([X, df], axis=1)\n', (4609, 4626), True, 'import pandas as pd\n'), ((5413, 5446), 'pandas.DataFrame', 'pd.DataFrame', (['cols_representation'], {}), '(cols_representation)\n', (5425, 5446), True, 'import pandas as pd\n'), ((5714, 5740), 'pandas.concat', 'pd.concat', (['[X, df]'], {'axis': '(1)'}), '([X, df], axis=1)\n', (5723, 5740), True, 'import pandas as pd\n'), ((7243, 7276), 'pandas.DataFrame', 'pd.DataFrame', (['cols_representation'], {}), '(cols_representation)\n', (7255, 7276), True, 'import pandas as pd\n'), ((7609, 7635), 'pandas.concat', 'pd.concat', (['[X, df]'], {'axis': '(1)'}), '([X, df], axis=1)\n', (7618, 7635), True, 'import pandas as pd\n'), ((8255, 8288), 'pandas.DataFrame', 'pd.DataFrame', (['cols_representation'], {}), '(cols_representation)\n', (8267, 8288), True, 'import pandas as pd\n'), ((8621, 8647), 'pandas.concat', 'pd.concat', (['[X, df]'], {'axis': '(1)'}), '([X, df], axis=1)\n', (8630, 8647), True, 'import pandas as pd\n'), ((9041, 9078), 'numpy.unique', 'np.unique', (['X[col]'], {'return_counts': '(True)'}), '(X[col], return_counts=True)\n', (9050, 9078), True, 'import numpy as np\n'), ((9340, 9377), 'numpy.unique', 'np.unique', (['X[col]'], {'return_counts': '(True)'}), '(X[col], return_counts=True)\n', (9349, 9377), True, 'import numpy as np\n'), ((3994, 4032), 'numpy.zeros', 'np.zeros', (['(X.shape[0], val_t.shape[1])'], {}), '((X.shape[0], val_t.shape[1]))\n', (4002, 4032), True, 'import numpy as np\n'), ((5213, 5236), 'numpy.zeros', 'np.zeros', (['test_tr.shape'], {}), '(test_tr.shape)\n', (5221, 5236), True, 'import numpy as np\n')] |
import pickle
import struct
from unittest import mock
import numpy as np
import pytest
import pygeos
from .common import all_types, empty_point, point, point_z
# fmt: off
POINT11_WKB = b"\x01\x01\x00\x00\x00" + struct.pack("<2d", 1.0, 1.0)
NAN = struct.pack("<d", float("nan"))
POINT_NAN_WKB = b'\x01\x01\x00\x00\x00' + (NAN * 2)
POINTZ_NAN_WKB = b'\x01\x01\x00\x00\x80' + (NAN * 3)
MULTIPOINT_NAN_WKB = b'\x01\x04\x00\x00\x00\x01\x00\x00\x00\x01\x01\x00\x00\x00' + (NAN * 2)
MULTIPOINTZ_NAN_WKB = b'\x01\x04\x00\x00\x80\x01\x00\x00\x00\x01\x01\x00\x00\x80' + (NAN * 3)
GEOMETRYCOLLECTION_NAN_WKB = b'\x01\x07\x00\x00\x00\x01\x00\x00\x00\x01\x01\x00\x00\x00' + (NAN * 2)
GEOMETRYCOLLECTIONZ_NAN_WKB = b'\x01\x07\x00\x00\x80\x01\x00\x00\x00\x01\x01\x00\x00\x80' + (NAN * 3)
NESTED_COLLECTION_NAN_WKB = b'\x01\x07\x00\x00\x00\x01\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x01\x01\x00\x00\x00' + (NAN * 2)
NESTED_COLLECTIONZ_NAN_WKB = b'\x01\x07\x00\x00\x80\x01\x00\x00\x00\x01\x04\x00\x00\x80\x01\x00\x00\x00\x01\x01\x00\x00\x80' + (NAN * 3)
# fmt: on
class ShapelyGeometryMock:
def __init__(self, g):
self.g = g
self.__geom__ = g._ptr if hasattr(g, "_ptr") else g
@property
def __array_interface__(self):
# this should not be called
# (starting with numpy 1.20 it is called, but not used)
return np.array([1.0, 2.0]).__array_interface__
@property
def wkb(self):
return pygeos.to_wkb(self.g)
@property
def geom_type(self):
idx = pygeos.get_type_id(self.g)
return [
"None",
"Point",
"LineString",
"LinearRing",
"Polygon",
"MultiPoint",
"MultiLineString",
"MultiPolygon",
"GeometryCollection",
][idx]
@property
def is_empty(self):
return pygeos.is_empty(self.g)
class ShapelyPreparedMock:
def __init__(self, g):
self.context = ShapelyGeometryMock(g)
def shapely_wkb_loads_mock(wkb):
geom = pygeos.from_wkb(wkb)
return ShapelyGeometryMock(geom)
def test_from_wkt():
expected = pygeos.points(1, 1)
actual = pygeos.from_wkt("POINT (1 1)")
assert pygeos.equals(actual, expected)
# also accept bytes
actual = pygeos.from_wkt(b"POINT (1 1)")
assert pygeos.equals(actual, expected)
def test_from_wkt_none():
# None propagates
assert pygeos.from_wkt(None) is None
def test_from_wkt_exceptions():
with pytest.raises(TypeError, match="Expected bytes, got int"):
pygeos.from_wkt(1)
with pytest.raises(
pygeos.GEOSException, match="Expected word but encountered end of stream"
):
pygeos.from_wkt("")
with pytest.raises(pygeos.GEOSException, match="Unknown type: 'NOT'"):
pygeos.from_wkt("NOT A WKT STRING")
def test_from_wkt_warn_on_invalid():
with pytest.warns(Warning, match="Invalid WKT"):
pygeos.from_wkt("", on_invalid="warn")
with pytest.warns(Warning, match="Invalid WKT"):
pygeos.from_wkt("NOT A WKT STRING", on_invalid="warn")
def test_from_wkb_ignore_on_invalid():
with pytest.warns(None):
pygeos.from_wkt("", on_invalid="ignore")
with pytest.warns(None):
pygeos.from_wkt("NOT A WKT STRING", on_invalid="ignore")
def test_from_wkt_on_invalid_unsupported_option():
with pytest.raises(ValueError, match="not a valid option"):
pygeos.from_wkt(b"\x01\x01\x00\x00\x00\x00", on_invalid="unsupported_option")
@pytest.mark.parametrize("geom", all_types)
def test_from_wkt_all_types(geom):
wkt = pygeos.to_wkt(geom)
actual = pygeos.from_wkt(wkt)
assert pygeos.equals(actual, geom)
@pytest.mark.parametrize(
"wkt",
("POINT EMPTY", "LINESTRING EMPTY", "POLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY"),
)
def test_from_wkt_empty(wkt):
geom = pygeos.from_wkt(wkt)
assert pygeos.is_geometry(geom).all()
assert pygeos.is_empty(geom).all()
assert pygeos.to_wkt(geom) == wkt
def test_from_wkb():
expected = pygeos.points(1, 1)
actual = pygeos.from_wkb(POINT11_WKB)
assert pygeos.equals(actual, expected)
def test_from_wkb_hex():
# HEX form
expected = pygeos.points(1, 1)
actual = pygeos.from_wkb("0101000000000000000000F03F000000000000F03F")
assert pygeos.equals(actual, expected)
actual = pygeos.from_wkb(b"0101000000000000000000F03F000000000000F03F")
assert pygeos.equals(actual, expected)
def test_from_wkb_none():
# None propagates
assert pygeos.from_wkb(None) is None
def test_from_wkb_exceptions():
with pytest.raises(TypeError, match="Expected bytes, got int"):
pygeos.from_wkb(1)
# invalid WKB
with pytest.raises(pygeos.GEOSException, match="Unexpected EOF parsing WKB"):
result = pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00")
assert result is None
# invalid ring in WKB
with pytest.raises(
pygeos.GEOSException,
match="Invalid number of points in LinearRing found 3 - must be 0 or >= 4",
):
result = pygeos.from_wkb(
b"\x01\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00P}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A0n\xa3!\xfc\xb05A\xa0\x11\xa5=\x90^=AP}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A"
)
assert result is None
def test_from_wkb_warn_on_invalid_warn():
# invalid WKB
with pytest.warns(Warning, match="Invalid WKB"):
result = pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00", on_invalid="warn")
assert result is None
# invalid ring in WKB
with pytest.warns(Warning, match="Invalid WKB"):
result = pygeos.from_wkb(
b"\x01\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00P}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A0n\xa3!\xfc\xb05A\xa0\x11\xa5=\x90^=AP}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A",
on_invalid="warn",
)
assert result is None
def test_from_wkb_ignore_on_invalid_ignore():
# invalid WKB
with pytest.warns(None) as w:
result = pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00", on_invalid="ignore")
assert result is None
assert len(w) == 0 # no warning
# invalid ring in WKB
with pytest.warns(None) as w:
result = pygeos.from_wkb(
b"\x01\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00P}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A0n\xa3!\xfc\xb05A\xa0\x11\xa5=\x90^=AP}\xae\xc6\x00\xb15A\x00\xde\x02I\x8e^=A",
on_invalid="ignore",
)
assert result is None
assert len(w) == 0 # no warning
def test_from_wkb_on_invalid_unsupported_option():
with pytest.raises(ValueError, match="not a valid option"):
pygeos.from_wkb(b"\x01\x01\x00\x00\x00\x00", on_invalid="unsupported_option")
@pytest.mark.parametrize("geom", all_types)
@pytest.mark.parametrize("use_hex", [False, True])
@pytest.mark.parametrize("byte_order", [0, 1])
def test_from_wkb_all_types(geom, use_hex, byte_order):
wkb = pygeos.to_wkb(geom, hex=use_hex, byte_order=byte_order)
actual = pygeos.from_wkb(wkb)
assert pygeos.equals(actual, geom)
@pytest.mark.parametrize(
"wkt",
("POINT EMPTY", "LINESTRING EMPTY", "POLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY"),
)
def test_from_wkb_empty(wkt):
wkb = pygeos.to_wkb(pygeos.Geometry(wkt))
geom = pygeos.from_wkb(wkb)
assert pygeos.is_geometry(geom).all()
assert pygeos.is_empty(geom).all()
assert pygeos.to_wkb(geom) == wkb
def test_to_wkt():
point = pygeos.points(1, 1)
actual = pygeos.to_wkt(point)
assert actual == "POINT (1 1)"
actual = pygeos.to_wkt(point, trim=False)
assert actual == "POINT (1.000000 1.000000)"
actual = pygeos.to_wkt(point, rounding_precision=3, trim=False)
assert actual == "POINT (1.000 1.000)"
def test_to_wkt_3D():
# 3D points
point_z = pygeos.points(1, 1, 1)
actual = pygeos.to_wkt(point_z)
assert actual == "POINT Z (1 1 1)"
actual = pygeos.to_wkt(point_z, output_dimension=3)
assert actual == "POINT Z (1 1 1)"
actual = pygeos.to_wkt(point_z, output_dimension=2)
assert actual == "POINT (1 1)"
actual = pygeos.to_wkt(point_z, old_3d=True)
assert actual == "POINT (1 1 1)"
def test_to_wkt_none():
# None propagates
assert pygeos.to_wkt(None) is None
def test_to_wkt_exceptions():
with pytest.raises(TypeError):
pygeos.to_wkt(1)
with pytest.raises(pygeos.GEOSException):
pygeos.to_wkt(point, output_dimension=4)
def test_to_wkt_point_empty():
assert pygeos.to_wkt(empty_point) == "POINT EMPTY"
def test_to_wkt_geometrycollection_with_point_empty():
collection = pygeos.geometrycollections([empty_point, point])
# do not check the full value as some GEOS versions give
# GEOMETRYCOLLECTION Z (...) and others give GEOMETRYCOLLECTION (...)
assert pygeos.to_wkt(collection).endswith("(POINT EMPTY, POINT (2 3))")
def test_to_wkt_multipoint_with_point_empty_errors():
# Test if segfault is prevented
geom = pygeos.multipoints([empty_point, point])
with pytest.raises(ValueError):
pygeos.to_wkt(geom)
def test_repr():
assert repr(point) == "<pygeos.Geometry POINT (2 3)>"
def test_repr_max_length():
# the repr is limited to 80 characters
geom = pygeos.linestrings(np.arange(1000), np.arange(1000))
representation = repr(geom)
assert len(representation) == 80
assert representation.endswith("...>")
def test_repr_multipoint_with_point_empty():
# Test if segfault is prevented
geom = pygeos.multipoints([point, empty_point])
assert repr(geom) == "<pygeos.Geometry Exception in WKT writer>"
def test_to_wkb():
point = pygeos.points(1, 1)
actual = pygeos.to_wkb(point, byte_order=1)
assert actual == POINT11_WKB
def test_to_wkb_hex():
point = pygeos.points(1, 1)
actual = pygeos.to_wkb(point, hex=True, byte_order=1)
le = "01"
point_type = "01000000"
coord = "000000000000F03F" # 1.0 as double (LE)
assert actual == le + point_type + 2 * coord
def test_to_wkb_3D():
point_z = pygeos.points(1, 1, 1)
actual = pygeos.to_wkb(point_z, byte_order=1)
# fmt: off
assert actual == b"\x01\x01\x00\x00\x80\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\xf0?" # noqa
# fmt: on
actual = pygeos.to_wkb(point_z, output_dimension=2, byte_order=1)
assert actual == POINT11_WKB
def test_to_wkb_none():
# None propagates
assert pygeos.to_wkb(None) is None
def test_to_wkb_exceptions():
with pytest.raises(TypeError):
pygeos.to_wkb(1)
with pytest.raises(pygeos.GEOSException):
pygeos.to_wkb(point, output_dimension=4)
def test_to_wkb_byte_order():
point = pygeos.points(1.0, 1.0)
be = b"\x00"
le = b"\x01"
point_type = b"\x01\x00\x00\x00" # 1 as 32-bit uint (LE)
coord = b"\x00\x00\x00\x00\x00\x00\xf0?" # 1.0 as double (LE)
assert pygeos.to_wkb(point, byte_order=1) == le + point_type + 2 * coord
assert pygeos.to_wkb(point, byte_order=0) == be + point_type[::-1] + 2 * coord[::-1]
def test_to_wkb_srid():
# hex representation of POINT (0 0) with SRID=4
ewkb = "01010000200400000000000000000000000000000000000000"
wkb = "010100000000000000000000000000000000000000"
actual = pygeos.from_wkb(ewkb)
assert pygeos.to_wkt(actual, trim=True) == "POINT (0 0)"
assert pygeos.to_wkb(actual, hex=True, byte_order=1) == wkb
assert pygeos.to_wkb(actual, hex=True, include_srid=True, byte_order=1) == ewkb
point = pygeos.points(1, 1)
point_with_srid = pygeos.set_srid(point, np.int32(4326))
result = pygeos.to_wkb(point_with_srid, include_srid=True, byte_order=1)
assert np.frombuffer(result[5:9], "<u4").item() == 4326
@pytest.mark.skipif(
pygeos.geos_version >= (3, 8, 0), reason="Pre GEOS 3.8.0 has 3D empty points"
)
@pytest.mark.parametrize(
"geom,dims,expected",
[
(empty_point, 2, POINT_NAN_WKB),
(empty_point, 3, POINTZ_NAN_WKB),
(pygeos.multipoints([empty_point]), 2, MULTIPOINT_NAN_WKB),
(pygeos.multipoints([empty_point]), 3, MULTIPOINTZ_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 2, GEOMETRYCOLLECTION_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 3, GEOMETRYCOLLECTIONZ_NAN_WKB),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
2,
NESTED_COLLECTION_NAN_WKB,
),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
3,
NESTED_COLLECTIONZ_NAN_WKB,
),
],
)
def test_to_wkb_point_empty_pre_geos38(geom, dims, expected):
actual = pygeos.to_wkb(geom, output_dimension=dims, byte_order=1)
# Use numpy.isnan; there are many byte representations for NaN
assert actual[: -dims * 8] == expected[: -dims * 8]
assert np.isnan(struct.unpack("<{}d".format(dims), actual[-dims * 8 :])).all()
@pytest.mark.skipif(
pygeos.geos_version < (3, 8, 0), reason="Post GEOS 3.8.0 has 2D empty points"
)
@pytest.mark.parametrize(
"geom,dims,expected",
[
(empty_point, 2, POINT_NAN_WKB),
(empty_point, 3, POINT_NAN_WKB),
(pygeos.multipoints([empty_point]), 2, MULTIPOINT_NAN_WKB),
(pygeos.multipoints([empty_point]), 3, MULTIPOINT_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 2, GEOMETRYCOLLECTION_NAN_WKB),
(pygeos.geometrycollections([empty_point]), 3, GEOMETRYCOLLECTION_NAN_WKB),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
2,
NESTED_COLLECTION_NAN_WKB,
),
(
pygeos.geometrycollections([pygeos.multipoints([empty_point])]),
3,
NESTED_COLLECTION_NAN_WKB,
),
],
)
def test_to_wkb_point_empty_post_geos38(geom, dims, expected):
# Post GEOS 3.8: empty point is 2D
actual = pygeos.to_wkb(geom, output_dimension=dims, byte_order=1)
# Use numpy.isnan; there are many byte representations for NaN
assert actual[: -2 * 8] == expected[: -2 * 8]
assert np.isnan(struct.unpack("<2d", actual[-2 * 8 :])).all()
@pytest.mark.parametrize(
"wkb,expected_type",
[
(POINT_NAN_WKB, 0),
(POINTZ_NAN_WKB, 0),
(MULTIPOINT_NAN_WKB, 4),
(MULTIPOINTZ_NAN_WKB, 4),
(GEOMETRYCOLLECTION_NAN_WKB, 7),
(GEOMETRYCOLLECTIONZ_NAN_WKB, 7),
(NESTED_COLLECTION_NAN_WKB, 7),
(NESTED_COLLECTIONZ_NAN_WKB, 7),
],
)
def test_from_wkb_point_empty(wkb, expected_type):
geom = pygeos.from_wkb(wkb)
# POINT (nan nan) transforms to an empty point
# Note that the dimensionality (2D/3D) is GEOS-version dependent
assert pygeos.is_empty(geom)
assert pygeos.get_type_id(geom) == expected_type
def test_to_wkb_point_empty_srid():
expected = pygeos.set_srid(empty_point, 4236)
wkb = pygeos.to_wkb(expected, include_srid=True)
actual = pygeos.from_wkb(wkb)
assert pygeos.get_srid(actual) == 4236
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely(geom):
actual = pygeos.from_shapely(ShapelyGeometryMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_prepared(geom):
actual = pygeos.from_shapely(ShapelyPreparedMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_arr():
actual = pygeos.from_shapely([ShapelyGeometryMock(point), None])
assert pygeos.equals(point, actual[0])
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_none():
actual = pygeos.from_shapely(None)
assert actual is None
@pytest.mark.parametrize("geom", [1, 2.3, "x", ShapelyGeometryMock(None)])
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", True)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_error(geom):
with pytest.raises(TypeError):
pygeos.from_shapely(geom)
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible(geom):
actual = pygeos.from_shapely(ShapelyGeometryMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible_prepared(geom):
actual = pygeos.from_shapely(ShapelyPreparedMock(geom))
assert isinstance(actual, pygeos.Geometry)
assert pygeos.equals(geom, actual)
assert geom._ptr != actual._ptr
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible_none():
actual = pygeos.from_shapely(None)
assert actual is None
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.ShapelyPreparedGeometry", ShapelyPreparedMock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_from_shapely_incompatible_array():
actual = pygeos.from_shapely([ShapelyGeometryMock(point), None])
assert pygeos.equals(point, actual[0])
@pytest.mark.parametrize("geom", all_types)
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.shapely_wkb_loads", shapely_wkb_loads_mock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_to_shapely_incompatible(geom):
actual = pygeos.to_shapely(geom)
assert isinstance(actual, ShapelyGeometryMock)
assert pygeos.equals(geom, actual.g)
assert geom._ptr != actual.g._ptr
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.shapely_wkb_loads", shapely_wkb_loads_mock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_to_shapely_incompatible_none():
actual = pygeos.to_shapely(None)
assert actual is None
@mock.patch("pygeos.io.ShapelyGeometry", ShapelyGeometryMock)
@mock.patch("pygeos.io.shapely_wkb_loads", shapely_wkb_loads_mock)
@mock.patch("pygeos.io.shapely_compatible", False)
@mock.patch("pygeos.io._shapely_checked", True)
def test_to_shapely_incompatible_array():
actual = pygeos.to_shapely([point, None])
assert pygeos.equals(point, actual[0].g)
@pytest.mark.parametrize("geom", all_types + (point_z, empty_point))
def test_pickle(geom):
if pygeos.get_type_id(geom) == 2:
# Linearrings get converted to linestrings
expected = pygeos.linestrings(pygeos.get_coordinates(geom))
else:
expected = geom
pickled = pickle.dumps(geom)
assert pygeos.equals_exact(pickle.loads(pickled), expected)
def test_pickle_with_srid():
geom = pygeos.set_srid(point, 4326)
pickled = pickle.dumps(geom)
assert pygeos.get_srid(pickle.loads(pickled)) == 4326
| [
"pygeos.equals",
"pickle.dumps",
"pygeos.set_srid",
"numpy.int32",
"numpy.array",
"pygeos.to_wkb",
"pygeos.is_geometry",
"pygeos.from_wkt",
"pickle.loads",
"pygeos.geometrycollections",
"unittest.mock.patch",
"numpy.arange",
"pygeos.to_wkt",
"pygeos.get_srid",
"pygeos.Geometry",
"pytes... | [((3519, 3561), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (3542, 3561), False, 'import pytest\n'), ((3703, 3819), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wkt"""', "('POINT EMPTY', 'LINESTRING EMPTY', 'POLYGON EMPTY', 'GEOMETRYCOLLECTION EMPTY'\n )"], {}), "('wkt', ('POINT EMPTY', 'LINESTRING EMPTY',\n 'POLYGON EMPTY', 'GEOMETRYCOLLECTION EMPTY'))\n", (3726, 3819), False, 'import pytest\n'), ((6761, 6803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (6784, 6803), False, 'import pytest\n'), ((6805, 6854), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_hex"""', '[False, True]'], {}), "('use_hex', [False, True])\n", (6828, 6854), False, 'import pytest\n'), ((6856, 6901), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""byte_order"""', '[0, 1]'], {}), "('byte_order', [0, 1])\n", (6879, 6901), False, 'import pytest\n'), ((7100, 7216), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wkt"""', "('POINT EMPTY', 'LINESTRING EMPTY', 'POLYGON EMPTY', 'GEOMETRYCOLLECTION EMPTY'\n )"], {}), "('wkt', ('POINT EMPTY', 'LINESTRING EMPTY',\n 'POLYGON EMPTY', 'GEOMETRYCOLLECTION EMPTY'))\n", (7123, 7216), False, 'import pytest\n'), ((11768, 11870), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version >= (3, 8, 0))'], {'reason': '"""Pre GEOS 3.8.0 has 3D empty points"""'}), "(pygeos.geos_version >= (3, 8, 0), reason=\n 'Pre GEOS 3.8.0 has 3D empty points')\n", (11786, 11870), False, 'import pytest\n'), ((12974, 13076), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""Post GEOS 3.8.0 has 2D empty points"""'}), "(pygeos.geos_version < (3, 8, 0), reason=\n 'Post GEOS 3.8.0 has 2D empty points')\n", (12992, 13076), False, 'import pytest\n'), ((14193, 14477), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wkb,expected_type"""', '[(POINT_NAN_WKB, 0), (POINTZ_NAN_WKB, 0), (MULTIPOINT_NAN_WKB, 4), (\n MULTIPOINTZ_NAN_WKB, 4), (GEOMETRYCOLLECTION_NAN_WKB, 7), (\n GEOMETRYCOLLECTIONZ_NAN_WKB, 7), (NESTED_COLLECTION_NAN_WKB, 7), (\n NESTED_COLLECTIONZ_NAN_WKB, 7)]'], {}), "('wkb,expected_type', [(POINT_NAN_WKB, 0), (\n POINTZ_NAN_WKB, 0), (MULTIPOINT_NAN_WKB, 4), (MULTIPOINTZ_NAN_WKB, 4),\n (GEOMETRYCOLLECTION_NAN_WKB, 7), (GEOMETRYCOLLECTIONZ_NAN_WKB, 7), (\n NESTED_COLLECTION_NAN_WKB, 7), (NESTED_COLLECTIONZ_NAN_WKB, 7)])\n", (14216, 14477), False, 'import pytest\n'), ((15056, 15098), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (15079, 15098), False, 'import pytest\n'), ((15100, 15160), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (15110, 15160), False, 'from unittest import mock\n'), ((15162, 15230), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (15172, 15230), False, 'from unittest import mock\n'), ((15232, 15280), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(True)'], {}), "('pygeos.io.shapely_compatible', True)\n", (15242, 15280), False, 'from unittest import mock\n'), ((15282, 15328), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (15292, 15328), False, 'from unittest import mock\n'), ((15543, 15585), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (15566, 15585), False, 'import pytest\n'), ((15587, 15647), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (15597, 15647), False, 'from unittest import mock\n'), ((15649, 15717), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (15659, 15717), False, 'from unittest import mock\n'), ((15719, 15767), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(True)'], {}), "('pygeos.io.shapely_compatible', True)\n", (15729, 15767), False, 'from unittest import mock\n'), ((15769, 15815), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (15779, 15815), False, 'from unittest import mock\n'), ((16039, 16099), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (16049, 16099), False, 'from unittest import mock\n'), ((16101, 16169), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (16111, 16169), False, 'from unittest import mock\n'), ((16171, 16219), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(True)'], {}), "('pygeos.io.shapely_compatible', True)\n", (16181, 16219), False, 'from unittest import mock\n'), ((16221, 16267), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (16231, 16267), False, 'from unittest import mock\n'), ((16412, 16472), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (16422, 16472), False, 'from unittest import mock\n'), ((16474, 16542), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (16484, 16542), False, 'from unittest import mock\n'), ((16544, 16592), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(True)'], {}), "('pygeos.io.shapely_compatible', True)\n", (16554, 16592), False, 'from unittest import mock\n'), ((16594, 16640), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (16604, 16640), False, 'from unittest import mock\n'), ((16814, 16874), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (16824, 16874), False, 'from unittest import mock\n'), ((16876, 16944), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (16886, 16944), False, 'from unittest import mock\n'), ((16946, 16994), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(True)'], {}), "('pygeos.io.shapely_compatible', True)\n", (16956, 16994), False, 'from unittest import mock\n'), ((16996, 17042), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (17006, 17042), False, 'from unittest import mock\n'), ((17150, 17192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (17173, 17192), False, 'import pytest\n'), ((17194, 17254), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (17204, 17254), False, 'from unittest import mock\n'), ((17256, 17324), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (17266, 17324), False, 'from unittest import mock\n'), ((17326, 17375), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (17336, 17375), False, 'from unittest import mock\n'), ((17377, 17423), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (17387, 17423), False, 'from unittest import mock\n'), ((17651, 17693), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (17674, 17693), False, 'import pytest\n'), ((17695, 17755), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (17705, 17755), False, 'from unittest import mock\n'), ((17757, 17825), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (17767, 17825), False, 'from unittest import mock\n'), ((17827, 17876), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (17837, 17876), False, 'from unittest import mock\n'), ((17878, 17924), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (17888, 17924), False, 'from unittest import mock\n'), ((18161, 18221), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (18171, 18221), False, 'from unittest import mock\n'), ((18223, 18291), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (18233, 18291), False, 'from unittest import mock\n'), ((18293, 18342), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (18303, 18342), False, 'from unittest import mock\n'), ((18344, 18390), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (18354, 18390), False, 'from unittest import mock\n'), ((18502, 18562), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (18512, 18562), False, 'from unittest import mock\n'), ((18564, 18632), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyPreparedGeometry"""', 'ShapelyPreparedMock'], {}), "('pygeos.io.ShapelyPreparedGeometry', ShapelyPreparedMock)\n", (18574, 18632), False, 'from unittest import mock\n'), ((18634, 18683), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (18644, 18683), False, 'from unittest import mock\n'), ((18685, 18731), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (18695, 18731), False, 'from unittest import mock\n'), ((18891, 18933), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', 'all_types'], {}), "('geom', all_types)\n", (18914, 18933), False, 'import pytest\n'), ((18935, 18995), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (18945, 18995), False, 'from unittest import mock\n'), ((18997, 19062), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_wkb_loads"""', 'shapely_wkb_loads_mock'], {}), "('pygeos.io.shapely_wkb_loads', shapely_wkb_loads_mock)\n", (19007, 19062), False, 'from unittest import mock\n'), ((19064, 19113), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (19074, 19113), False, 'from unittest import mock\n'), ((19115, 19161), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (19125, 19161), False, 'from unittest import mock\n'), ((19372, 19432), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (19382, 19432), False, 'from unittest import mock\n'), ((19434, 19499), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_wkb_loads"""', 'shapely_wkb_loads_mock'], {}), "('pygeos.io.shapely_wkb_loads', shapely_wkb_loads_mock)\n", (19444, 19499), False, 'from unittest import mock\n'), ((19501, 19550), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (19511, 19550), False, 'from unittest import mock\n'), ((19552, 19598), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (19562, 19598), False, 'from unittest import mock\n'), ((19706, 19766), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.ShapelyGeometry"""', 'ShapelyGeometryMock'], {}), "('pygeos.io.ShapelyGeometry', ShapelyGeometryMock)\n", (19716, 19766), False, 'from unittest import mock\n'), ((19768, 19833), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_wkb_loads"""', 'shapely_wkb_loads_mock'], {}), "('pygeos.io.shapely_wkb_loads', shapely_wkb_loads_mock)\n", (19778, 19833), False, 'from unittest import mock\n'), ((19835, 19884), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io.shapely_compatible"""', '(False)'], {}), "('pygeos.io.shapely_compatible', False)\n", (19845, 19884), False, 'from unittest import mock\n'), ((19886, 19932), 'unittest.mock.patch', 'mock.patch', (['"""pygeos.io._shapely_checked"""', '(True)'], {}), "('pygeos.io._shapely_checked', True)\n", (19896, 19932), False, 'from unittest import mock\n'), ((20069, 20136), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', '(all_types + (point_z, empty_point))'], {}), "('geom', all_types + (point_z, empty_point))\n", (20092, 20136), False, 'import pytest\n'), ((215, 243), 'struct.pack', 'struct.pack', (['"""<2d"""', '(1.0)', '(1.0)'], {}), "('<2d', 1.0, 1.0)\n", (226, 243), False, 'import struct\n'), ((2046, 2066), 'pygeos.from_wkb', 'pygeos.from_wkb', (['wkb'], {}), '(wkb)\n', (2061, 2066), False, 'import pygeos\n'), ((2142, 2161), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (2155, 2161), False, 'import pygeos\n'), ((2175, 2205), 'pygeos.from_wkt', 'pygeos.from_wkt', (['"""POINT (1 1)"""'], {}), "('POINT (1 1)')\n", (2190, 2205), False, 'import pygeos\n'), ((2217, 2248), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (2230, 2248), False, 'import pygeos\n'), ((2286, 2317), 'pygeos.from_wkt', 'pygeos.from_wkt', (["b'POINT (1 1)'"], {}), "(b'POINT (1 1)')\n", (2301, 2317), False, 'import pygeos\n'), ((2329, 2360), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (2342, 2360), False, 'import pygeos\n'), ((3607, 3626), 'pygeos.to_wkt', 'pygeos.to_wkt', (['geom'], {}), '(geom)\n', (3620, 3626), False, 'import pygeos\n'), ((3640, 3660), 'pygeos.from_wkt', 'pygeos.from_wkt', (['wkt'], {}), '(wkt)\n', (3655, 3660), False, 'import pygeos\n'), ((3672, 3699), 'pygeos.equals', 'pygeos.equals', (['actual', 'geom'], {}), '(actual, geom)\n', (3685, 3699), False, 'import pygeos\n'), ((3868, 3888), 'pygeos.from_wkt', 'pygeos.from_wkt', (['wkt'], {}), '(wkt)\n', (3883, 3888), False, 'import pygeos\n'), ((4046, 4065), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (4059, 4065), False, 'import pygeos\n'), ((4079, 4107), 'pygeos.from_wkb', 'pygeos.from_wkb', (['POINT11_WKB'], {}), '(POINT11_WKB)\n', (4094, 4107), False, 'import pygeos\n'), ((4119, 4150), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (4132, 4150), False, 'import pygeos\n'), ((4208, 4227), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (4221, 4227), False, 'import pygeos\n'), ((4241, 4302), 'pygeos.from_wkb', 'pygeos.from_wkb', (['"""0101000000000000000000F03F000000000000F03F"""'], {}), "('0101000000000000000000F03F000000000000F03F')\n", (4256, 4302), False, 'import pygeos\n'), ((4314, 4345), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (4327, 4345), False, 'import pygeos\n'), ((4359, 4421), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'0101000000000000000000F03F000000000000F03F'"], {}), "(b'0101000000000000000000F03F000000000000F03F')\n", (4374, 4421), False, 'import pygeos\n'), ((4433, 4464), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (4446, 4464), False, 'import pygeos\n'), ((6968, 7023), 'pygeos.to_wkb', 'pygeos.to_wkb', (['geom'], {'hex': 'use_hex', 'byte_order': 'byte_order'}), '(geom, hex=use_hex, byte_order=byte_order)\n', (6981, 7023), False, 'import pygeos\n'), ((7037, 7057), 'pygeos.from_wkb', 'pygeos.from_wkb', (['wkb'], {}), '(wkb)\n', (7052, 7057), False, 'import pygeos\n'), ((7069, 7096), 'pygeos.equals', 'pygeos.equals', (['actual', 'geom'], {}), '(actual, geom)\n', (7082, 7096), False, 'import pygeos\n'), ((7311, 7331), 'pygeos.from_wkb', 'pygeos.from_wkb', (['wkb'], {}), '(wkb)\n', (7326, 7331), False, 'import pygeos\n'), ((7484, 7503), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (7497, 7503), False, 'import pygeos\n'), ((7517, 7537), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point'], {}), '(point)\n', (7530, 7537), False, 'import pygeos\n'), ((7587, 7619), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point'], {'trim': '(False)'}), '(point, trim=False)\n', (7600, 7619), False, 'import pygeos\n'), ((7683, 7737), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point'], {'rounding_precision': '(3)', 'trim': '(False)'}), '(point, rounding_precision=3, trim=False)\n', (7696, 7737), False, 'import pygeos\n'), ((7835, 7857), 'pygeos.points', 'pygeos.points', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7848, 7857), False, 'import pygeos\n'), ((7871, 7893), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point_z'], {}), '(point_z)\n', (7884, 7893), False, 'import pygeos\n'), ((7946, 7988), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point_z'], {'output_dimension': '(3)'}), '(point_z, output_dimension=3)\n', (7959, 7988), False, 'import pygeos\n'), ((8042, 8084), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point_z'], {'output_dimension': '(2)'}), '(point_z, output_dimension=2)\n', (8055, 8084), False, 'import pygeos\n'), ((8134, 8169), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point_z'], {'old_3d': '(True)'}), '(point_z, old_3d=True)\n', (8147, 8169), False, 'import pygeos\n'), ((8644, 8692), 'pygeos.geometrycollections', 'pygeos.geometrycollections', (['[empty_point, point]'], {}), '([empty_point, point])\n', (8670, 8692), False, 'import pygeos\n'), ((9007, 9047), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point, point]'], {}), '([empty_point, point])\n', (9025, 9047), False, 'import pygeos\n'), ((9532, 9572), 'pygeos.multipoints', 'pygeos.multipoints', (['[point, empty_point]'], {}), '([point, empty_point])\n', (9550, 9572), False, 'import pygeos\n'), ((9675, 9694), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (9688, 9694), False, 'import pygeos\n'), ((9708, 9742), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point'], {'byte_order': '(1)'}), '(point, byte_order=1)\n', (9721, 9742), False, 'import pygeos\n'), ((9813, 9832), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (9826, 9832), False, 'import pygeos\n'), ((9846, 9890), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point'], {'hex': '(True)', 'byte_order': '(1)'}), '(point, hex=True, byte_order=1)\n', (9859, 9890), False, 'import pygeos\n'), ((10073, 10095), 'pygeos.points', 'pygeos.points', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (10086, 10095), False, 'import pygeos\n'), ((10109, 10145), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point_z'], {'byte_order': '(1)'}), '(point_z, byte_order=1)\n', (10122, 10145), False, 'import pygeos\n'), ((10328, 10384), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point_z'], {'output_dimension': '(2)', 'byte_order': '(1)'}), '(point_z, output_dimension=2, byte_order=1)\n', (10341, 10384), False, 'import pygeos\n'), ((10737, 10760), 'pygeos.points', 'pygeos.points', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (10750, 10760), False, 'import pygeos\n'), ((11302, 11323), 'pygeos.from_wkb', 'pygeos.from_wkb', (['ewkb'], {}), '(ewkb)\n', (11317, 11323), False, 'import pygeos\n'), ((11547, 11566), 'pygeos.points', 'pygeos.points', (['(1)', '(1)'], {}), '(1, 1)\n', (11560, 11566), False, 'import pygeos\n'), ((11641, 11704), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point_with_srid'], {'include_srid': '(True)', 'byte_order': '(1)'}), '(point_with_srid, include_srid=True, byte_order=1)\n', (11654, 11704), False, 'import pygeos\n'), ((12708, 12764), 'pygeos.to_wkb', 'pygeos.to_wkb', (['geom'], {'output_dimension': 'dims', 'byte_order': '(1)'}), '(geom, output_dimension=dims, byte_order=1)\n', (12721, 12764), False, 'import pygeos\n'), ((13950, 14006), 'pygeos.to_wkb', 'pygeos.to_wkb', (['geom'], {'output_dimension': 'dims', 'byte_order': '(1)'}), '(geom, output_dimension=dims, byte_order=1)\n', (13963, 14006), False, 'import pygeos\n'), ((14608, 14628), 'pygeos.from_wkb', 'pygeos.from_wkb', (['wkb'], {}), '(wkb)\n', (14623, 14628), False, 'import pygeos\n'), ((14760, 14781), 'pygeos.is_empty', 'pygeos.is_empty', (['geom'], {}), '(geom)\n', (14775, 14781), False, 'import pygeos\n'), ((14888, 14922), 'pygeos.set_srid', 'pygeos.set_srid', (['empty_point', '(4236)'], {}), '(empty_point, 4236)\n', (14903, 14922), False, 'import pygeos\n'), ((14933, 14975), 'pygeos.to_wkb', 'pygeos.to_wkb', (['expected'], {'include_srid': '(True)'}), '(expected, include_srid=True)\n', (14946, 14975), False, 'import pygeos\n'), ((14989, 15009), 'pygeos.from_wkb', 'pygeos.from_wkb', (['wkb'], {}), '(wkb)\n', (15004, 15009), False, 'import pygeos\n'), ((15476, 15503), 'pygeos.equals', 'pygeos.equals', (['geom', 'actual'], {}), '(geom, actual)\n', (15489, 15503), False, 'import pygeos\n'), ((15972, 15999), 'pygeos.equals', 'pygeos.equals', (['geom', 'actual'], {}), '(geom, actual)\n', (15985, 15999), False, 'import pygeos\n'), ((16377, 16408), 'pygeos.equals', 'pygeos.equals', (['point', 'actual[0]'], {}), '(point, actual[0])\n', (16390, 16408), False, 'import pygeos\n'), ((16684, 16709), 'pygeos.from_shapely', 'pygeos.from_shapely', (['None'], {}), '(None)\n', (16703, 16709), False, 'import pygeos\n'), ((17584, 17611), 'pygeos.equals', 'pygeos.equals', (['geom', 'actual'], {}), '(geom, actual)\n', (17597, 17611), False, 'import pygeos\n'), ((18094, 18121), 'pygeos.equals', 'pygeos.equals', (['geom', 'actual'], {}), '(geom, actual)\n', (18107, 18121), False, 'import pygeos\n'), ((18447, 18472), 'pygeos.from_shapely', 'pygeos.from_shapely', (['None'], {}), '(None)\n', (18466, 18472), False, 'import pygeos\n'), ((18856, 18887), 'pygeos.equals', 'pygeos.equals', (['point', 'actual[0]'], {}), '(point, actual[0])\n', (18869, 18887), False, 'import pygeos\n'), ((19215, 19238), 'pygeos.to_shapely', 'pygeos.to_shapely', (['geom'], {}), '(geom)\n', (19232, 19238), False, 'import pygeos\n'), ((19301, 19330), 'pygeos.equals', 'pygeos.equals', (['geom', 'actual.g'], {}), '(geom, actual.g)\n', (19314, 19330), False, 'import pygeos\n'), ((19653, 19676), 'pygeos.to_shapely', 'pygeos.to_shapely', (['None'], {}), '(None)\n', (19670, 19676), False, 'import pygeos\n'), ((19988, 20020), 'pygeos.to_shapely', 'pygeos.to_shapely', (['[point, None]'], {}), '([point, None])\n', (20005, 20020), False, 'import pygeos\n'), ((20032, 20065), 'pygeos.equals', 'pygeos.equals', (['point', 'actual[0].g'], {}), '(point, actual[0].g)\n', (20045, 20065), False, 'import pygeos\n'), ((20365, 20383), 'pickle.dumps', 'pickle.dumps', (['geom'], {}), '(geom)\n', (20377, 20383), False, 'import pickle\n'), ((20490, 20518), 'pygeos.set_srid', 'pygeos.set_srid', (['point', '(4326)'], {}), '(point, 4326)\n', (20505, 20518), False, 'import pygeos\n'), ((20533, 20551), 'pickle.dumps', 'pickle.dumps', (['geom'], {}), '(geom)\n', (20545, 20551), False, 'import pickle\n'), ((1450, 1471), 'pygeos.to_wkb', 'pygeos.to_wkb', (['self.g'], {}), '(self.g)\n', (1463, 1471), False, 'import pygeos\n'), ((1526, 1552), 'pygeos.get_type_id', 'pygeos.get_type_id', (['self.g'], {}), '(self.g)\n', (1544, 1552), False, 'import pygeos\n'), ((1874, 1897), 'pygeos.is_empty', 'pygeos.is_empty', (['self.g'], {}), '(self.g)\n', (1889, 1897), False, 'import pygeos\n'), ((2422, 2443), 'pygeos.from_wkt', 'pygeos.from_wkt', (['None'], {}), '(None)\n', (2437, 2443), False, 'import pygeos\n'), ((2495, 2552), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Expected bytes, got int"""'}), "(TypeError, match='Expected bytes, got int')\n", (2508, 2552), False, 'import pytest\n'), ((2562, 2580), 'pygeos.from_wkt', 'pygeos.from_wkt', (['(1)'], {}), '(1)\n', (2577, 2580), False, 'import pygeos\n'), ((2591, 2684), 'pytest.raises', 'pytest.raises', (['pygeos.GEOSException'], {'match': '"""Expected word but encountered end of stream"""'}), "(pygeos.GEOSException, match=\n 'Expected word but encountered end of stream')\n", (2604, 2684), False, 'import pytest\n'), ((2703, 2722), 'pygeos.from_wkt', 'pygeos.from_wkt', (['""""""'], {}), "('')\n", (2718, 2722), False, 'import pygeos\n'), ((2733, 2797), 'pytest.raises', 'pytest.raises', (['pygeos.GEOSException'], {'match': '"""Unknown type: \'NOT\'"""'}), '(pygeos.GEOSException, match="Unknown type: \'NOT\'")\n', (2746, 2797), False, 'import pytest\n'), ((2807, 2842), 'pygeos.from_wkt', 'pygeos.from_wkt', (['"""NOT A WKT STRING"""'], {}), "('NOT A WKT STRING')\n", (2822, 2842), False, 'import pygeos\n'), ((2891, 2933), 'pytest.warns', 'pytest.warns', (['Warning'], {'match': '"""Invalid WKT"""'}), "(Warning, match='Invalid WKT')\n", (2903, 2933), False, 'import pytest\n'), ((2943, 2981), 'pygeos.from_wkt', 'pygeos.from_wkt', (['""""""'], {'on_invalid': '"""warn"""'}), "('', on_invalid='warn')\n", (2958, 2981), False, 'import pygeos\n'), ((2992, 3034), 'pytest.warns', 'pytest.warns', (['Warning'], {'match': '"""Invalid WKT"""'}), "(Warning, match='Invalid WKT')\n", (3004, 3034), False, 'import pytest\n'), ((3044, 3098), 'pygeos.from_wkt', 'pygeos.from_wkt', (['"""NOT A WKT STRING"""'], {'on_invalid': '"""warn"""'}), "('NOT A WKT STRING', on_invalid='warn')\n", (3059, 3098), False, 'import pygeos\n'), ((3149, 3167), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (3161, 3167), False, 'import pytest\n'), ((3177, 3217), 'pygeos.from_wkt', 'pygeos.from_wkt', (['""""""'], {'on_invalid': '"""ignore"""'}), "('', on_invalid='ignore')\n", (3192, 3217), False, 'import pygeos\n'), ((3228, 3246), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (3240, 3246), False, 'import pytest\n'), ((3256, 3312), 'pygeos.from_wkt', 'pygeos.from_wkt', (['"""NOT A WKT STRING"""'], {'on_invalid': '"""ignore"""'}), "('NOT A WKT STRING', on_invalid='ignore')\n", (3271, 3312), False, 'import pygeos\n'), ((3375, 3428), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""not a valid option"""'}), "(ValueError, match='not a valid option')\n", (3388, 3428), False, 'import pytest\n'), ((3438, 3515), 'pygeos.from_wkt', 'pygeos.from_wkt', (["b'\\x01\\x01\\x00\\x00\\x00\\x00'"], {'on_invalid': '"""unsupported_option"""'}), "(b'\\x01\\x01\\x00\\x00\\x00\\x00', on_invalid='unsupported_option')\n", (3453, 3515), False, 'import pygeos\n'), ((3981, 4000), 'pygeos.to_wkt', 'pygeos.to_wkt', (['geom'], {}), '(geom)\n', (3994, 4000), False, 'import pygeos\n'), ((4526, 4547), 'pygeos.from_wkb', 'pygeos.from_wkb', (['None'], {}), '(None)\n', (4541, 4547), False, 'import pygeos\n'), ((4599, 4656), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Expected bytes, got int"""'}), "(TypeError, match='Expected bytes, got int')\n", (4612, 4656), False, 'import pytest\n'), ((4666, 4684), 'pygeos.from_wkb', 'pygeos.from_wkb', (['(1)'], {}), '(1)\n', (4681, 4684), False, 'import pygeos\n'), ((4713, 4784), 'pytest.raises', 'pytest.raises', (['pygeos.GEOSException'], {'match': '"""Unexpected EOF parsing WKB"""'}), "(pygeos.GEOSException, match='Unexpected EOF parsing WKB')\n", (4726, 4784), False, 'import pytest\n'), ((4803, 4847), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x01\\x00\\x00\\x00\\x00'"], {}), "(b'\\x01\\x01\\x00\\x00\\x00\\x00')\n", (4818, 4847), False, 'import pygeos\n'), ((4914, 5030), 'pytest.raises', 'pytest.raises', (['pygeos.GEOSException'], {'match': '"""Invalid number of points in LinearRing found 3 - must be 0 or >= 4"""'}), "(pygeos.GEOSException, match=\n 'Invalid number of points in LinearRing found 3 - must be 0 or >= 4')\n", (4927, 5030), False, 'import pytest\n'), ((5067, 5266), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x03\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00P}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A0n\\xa3!\\xfc\\xb05A\\xa0\\x11\\xa5=\\x90^=AP}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A'"], {}), "(\n b'\\x01\\x03\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00P}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A0n\\xa3!\\xfc\\xb05A\\xa0\\x11\\xa5=\\x90^=AP}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A'\n )\n", (5082, 5266), False, 'import pygeos\n'), ((5380, 5422), 'pytest.warns', 'pytest.warns', (['Warning'], {'match': '"""Invalid WKB"""'}), "(Warning, match='Invalid WKB')\n", (5392, 5422), False, 'import pytest\n'), ((5441, 5504), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x01\\x00\\x00\\x00\\x00'"], {'on_invalid': '"""warn"""'}), "(b'\\x01\\x01\\x00\\x00\\x00\\x00', on_invalid='warn')\n", (5456, 5504), False, 'import pygeos\n'), ((5571, 5613), 'pytest.warns', 'pytest.warns', (['Warning'], {'match': '"""Invalid WKB"""'}), "(Warning, match='Invalid WKB')\n", (5583, 5613), False, 'import pytest\n'), ((5632, 5850), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x03\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00P}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A0n\\xa3!\\xfc\\xb05A\\xa0\\x11\\xa5=\\x90^=AP}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A'"], {'on_invalid': '"""warn"""'}), "(\n b'\\x01\\x03\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00P}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A0n\\xa3!\\xfc\\xb05A\\xa0\\x11\\xa5=\\x90^=AP}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A'\n , on_invalid='warn')\n", (5647, 5850), False, 'import pygeos\n'), ((5981, 5999), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (5993, 5999), False, 'import pytest\n'), ((6023, 6088), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x01\\x00\\x00\\x00\\x00'"], {'on_invalid': '"""ignore"""'}), "(b'\\x01\\x01\\x00\\x00\\x00\\x00', on_invalid='ignore')\n", (6038, 6088), False, 'import pygeos\n'), ((6196, 6214), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (6208, 6214), False, 'import pytest\n'), ((6238, 6458), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x03\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00P}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A0n\\xa3!\\xfc\\xb05A\\xa0\\x11\\xa5=\\x90^=AP}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A'"], {'on_invalid': '"""ignore"""'}), "(\n b'\\x01\\x03\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00P}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A0n\\xa3!\\xfc\\xb05A\\xa0\\x11\\xa5=\\x90^=AP}\\xae\\xc6\\x00\\xb15A\\x00\\xde\\x02I\\x8e^=A'\n , on_invalid='ignore')\n", (6253, 6458), False, 'import pygeos\n'), ((6617, 6670), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""not a valid option"""'}), "(ValueError, match='not a valid option')\n", (6630, 6670), False, 'import pytest\n'), ((6680, 6757), 'pygeos.from_wkb', 'pygeos.from_wkb', (["b'\\x01\\x01\\x00\\x00\\x00\\x00'"], {'on_invalid': '"""unsupported_option"""'}), "(b'\\x01\\x01\\x00\\x00\\x00\\x00', on_invalid='unsupported_option')\n", (6695, 6757), False, 'import pygeos\n'), ((7278, 7298), 'pygeos.Geometry', 'pygeos.Geometry', (['wkt'], {}), '(wkt)\n', (7293, 7298), False, 'import pygeos\n'), ((7424, 7443), 'pygeos.to_wkb', 'pygeos.to_wkb', (['geom'], {}), '(geom)\n', (7437, 7443), False, 'import pygeos\n'), ((8266, 8285), 'pygeos.to_wkt', 'pygeos.to_wkt', (['None'], {}), '(None)\n', (8279, 8285), False, 'import pygeos\n'), ((8335, 8359), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8348, 8359), False, 'import pytest\n'), ((8369, 8385), 'pygeos.to_wkt', 'pygeos.to_wkt', (['(1)'], {}), '(1)\n', (8382, 8385), False, 'import pygeos\n'), ((8396, 8431), 'pytest.raises', 'pytest.raises', (['pygeos.GEOSException'], {}), '(pygeos.GEOSException)\n', (8409, 8431), False, 'import pytest\n'), ((8441, 8481), 'pygeos.to_wkt', 'pygeos.to_wkt', (['point'], {'output_dimension': '(4)'}), '(point, output_dimension=4)\n', (8454, 8481), False, 'import pygeos\n'), ((8526, 8552), 'pygeos.to_wkt', 'pygeos.to_wkt', (['empty_point'], {}), '(empty_point)\n', (8539, 8552), False, 'import pygeos\n'), ((9057, 9082), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9070, 9082), False, 'import pytest\n'), ((9092, 9111), 'pygeos.to_wkt', 'pygeos.to_wkt', (['geom'], {}), '(geom)\n', (9105, 9111), False, 'import pygeos\n'), ((9292, 9307), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (9301, 9307), True, 'import numpy as np\n'), ((9309, 9324), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (9318, 9324), True, 'import numpy as np\n'), ((10477, 10496), 'pygeos.to_wkb', 'pygeos.to_wkb', (['None'], {}), '(None)\n', (10490, 10496), False, 'import pygeos\n'), ((10546, 10570), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10559, 10570), False, 'import pytest\n'), ((10580, 10596), 'pygeos.to_wkb', 'pygeos.to_wkb', (['(1)'], {}), '(1)\n', (10593, 10596), False, 'import pygeos\n'), ((10607, 10642), 'pytest.raises', 'pytest.raises', (['pygeos.GEOSException'], {}), '(pygeos.GEOSException)\n', (10620, 10642), False, 'import pytest\n'), ((10652, 10692), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point'], {'output_dimension': '(4)'}), '(point, output_dimension=4)\n', (10665, 10692), False, 'import pygeos\n'), ((10936, 10970), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point'], {'byte_order': '(1)'}), '(point, byte_order=1)\n', (10949, 10970), False, 'import pygeos\n'), ((11013, 11047), 'pygeos.to_wkb', 'pygeos.to_wkb', (['point'], {'byte_order': '(0)'}), '(point, byte_order=0)\n', (11026, 11047), False, 'import pygeos\n'), ((11335, 11367), 'pygeos.to_wkt', 'pygeos.to_wkt', (['actual'], {'trim': '(True)'}), '(actual, trim=True)\n', (11348, 11367), False, 'import pygeos\n'), ((11397, 11442), 'pygeos.to_wkb', 'pygeos.to_wkb', (['actual'], {'hex': '(True)', 'byte_order': '(1)'}), '(actual, hex=True, byte_order=1)\n', (11410, 11442), False, 'import pygeos\n'), ((11461, 11525), 'pygeos.to_wkb', 'pygeos.to_wkb', (['actual'], {'hex': '(True)', 'include_srid': '(True)', 'byte_order': '(1)'}), '(actual, hex=True, include_srid=True, byte_order=1)\n', (11474, 11525), False, 'import pygeos\n'), ((11612, 11626), 'numpy.int32', 'np.int32', (['(4326)'], {}), '(4326)\n', (11620, 11626), True, 'import numpy as np\n'), ((14793, 14817), 'pygeos.get_type_id', 'pygeos.get_type_id', (['geom'], {}), '(geom)\n', (14811, 14817), False, 'import pygeos\n'), ((15021, 15044), 'pygeos.get_srid', 'pygeos.get_srid', (['actual'], {}), '(actual)\n', (15036, 15044), False, 'import pygeos\n'), ((17087, 17111), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (17100, 17111), False, 'import pytest\n'), ((17121, 17146), 'pygeos.from_shapely', 'pygeos.from_shapely', (['geom'], {}), '(geom)\n', (17140, 17146), False, 'import pygeos\n'), ((20167, 20191), 'pygeos.get_type_id', 'pygeos.get_type_id', (['geom'], {}), '(geom)\n', (20185, 20191), False, 'import pygeos\n'), ((20415, 20436), 'pickle.loads', 'pickle.loads', (['pickled'], {}), '(pickled)\n', (20427, 20436), False, 'import pickle\n'), ((1360, 1380), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (1368, 1380), True, 'import numpy as np\n'), ((3900, 3924), 'pygeos.is_geometry', 'pygeos.is_geometry', (['geom'], {}), '(geom)\n', (3918, 3924), False, 'import pygeos\n'), ((3942, 3963), 'pygeos.is_empty', 'pygeos.is_empty', (['geom'], {}), '(geom)\n', (3957, 3963), False, 'import pygeos\n'), ((7343, 7367), 'pygeos.is_geometry', 'pygeos.is_geometry', (['geom'], {}), '(geom)\n', (7361, 7367), False, 'import pygeos\n'), ((7385, 7406), 'pygeos.is_empty', 'pygeos.is_empty', (['geom'], {}), '(geom)\n', (7400, 7406), False, 'import pygeos\n'), ((8839, 8864), 'pygeos.to_wkt', 'pygeos.to_wkt', (['collection'], {}), '(collection)\n', (8852, 8864), False, 'import pygeos\n'), ((12022, 12055), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (12040, 12055), False, 'import pygeos\n'), ((12090, 12123), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (12108, 12123), False, 'import pygeos\n'), ((12159, 12200), 'pygeos.geometrycollections', 'pygeos.geometrycollections', (['[empty_point]'], {}), '([empty_point])\n', (12185, 12200), False, 'import pygeos\n'), ((12243, 12284), 'pygeos.geometrycollections', 'pygeos.geometrycollections', (['[empty_point]'], {}), '([empty_point])\n', (12269, 12284), False, 'import pygeos\n'), ((13227, 13260), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (13245, 13260), False, 'import pygeos\n'), ((13295, 13328), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (13313, 13328), False, 'import pygeos\n'), ((13363, 13404), 'pygeos.geometrycollections', 'pygeos.geometrycollections', (['[empty_point]'], {}), '([empty_point])\n', (13389, 13404), False, 'import pygeos\n'), ((13447, 13488), 'pygeos.geometrycollections', 'pygeos.geometrycollections', (['[empty_point]'], {}), '([empty_point])\n', (13473, 13488), False, 'import pygeos\n'), ((20287, 20315), 'pygeos.get_coordinates', 'pygeos.get_coordinates', (['geom'], {}), '(geom)\n', (20309, 20315), False, 'import pygeos\n'), ((20579, 20600), 'pickle.loads', 'pickle.loads', (['pickled'], {}), '(pickled)\n', (20591, 20600), False, 'import pickle\n'), ((11716, 11749), 'numpy.frombuffer', 'np.frombuffer', (['result[5:9]', '"""<u4"""'], {}), "(result[5:9], '<u4')\n", (11729, 11749), True, 'import numpy as np\n'), ((14144, 14181), 'struct.unpack', 'struct.unpack', (['"""<2d"""', 'actual[-2 * 8:]'], {}), "('<2d', actual[-2 * 8:])\n", (14157, 14181), False, 'import struct\n'), ((12369, 12402), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (12387, 12402), False, 'import pygeos\n'), ((12521, 12554), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (12539, 12554), False, 'import pygeos\n'), ((13572, 13605), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (13590, 13605), False, 'import pygeos\n'), ((13724, 13757), 'pygeos.multipoints', 'pygeos.multipoints', (['[empty_point]'], {}), '([empty_point])\n', (13742, 13757), False, 'import pygeos\n')] |
#!/usr/bin/env python3
# coding: utf8
"""
Normalizes real and imagninary matrix values, used for the leakyrelu model.
"""
__author__ = '<NAME>, <NAME>, <NAME>'
__email__ = "<EMAIL>"
import numpy as np
import math
name = 'norm_real_imag'
def normalize(track_complex):
"""
Normalizes training data to use only amplitudes
"""
magnitude = np.abs(track_complex)
magnitude = np.reshape(magnitude, magnitude.shape + (1,))
return magnitude
def denormalize(magnitude_predicted, mix_complex):
"""
Returns denormalized values as array of complex values (sft)
"""
denorm = np.reshape(magnitude_predicted, magnitude_predicted.shape[0:2])
denorm = denorm.clip(0)
denorm = denorm * np.exp(np.angle(mix_complex) * 1j)
return denorm
| [
"numpy.abs",
"numpy.reshape",
"numpy.angle"
] | [((357, 378), 'numpy.abs', 'np.abs', (['track_complex'], {}), '(track_complex)\n', (363, 378), True, 'import numpy as np\n'), ((395, 440), 'numpy.reshape', 'np.reshape', (['magnitude', '(magnitude.shape + (1,))'], {}), '(magnitude, magnitude.shape + (1,))\n', (405, 440), True, 'import numpy as np\n'), ((609, 672), 'numpy.reshape', 'np.reshape', (['magnitude_predicted', 'magnitude_predicted.shape[0:2]'], {}), '(magnitude_predicted, magnitude_predicted.shape[0:2])\n', (619, 672), True, 'import numpy as np\n'), ((738, 759), 'numpy.angle', 'np.angle', (['mix_complex'], {}), '(mix_complex)\n', (746, 759), True, 'import numpy as np\n')] |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.ndimage
from parameterized import parameterized
from tests.utils import NumpyImageTestCase2D, NumpyImageTestCase3D
from monai.transforms import RandRotate
class TestRandRotate2D(NumpyImageTestCase2D):
@parameterized.expand(
[
(90, True, "bilinear", "border", False),
(45, True, "nearest", "border", False),
(180, False, "nearest", "zeros", True),
((-45, 0), False, "nearest", "zeros", True),
]
)
def test_correct_results(self, degrees, keep_size, mode, padding_mode, align_corners):
rotate_fn = RandRotate(
range_x=degrees,
prob=1.0,
keep_size=keep_size,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
rotate_fn.set_random_state(243)
rotated = rotate_fn(self.imt[0])
_order = 0 if mode == "nearest" else 1
if mode == "border":
_mode = "nearest"
elif mode == "reflection":
_mode = "reflect"
else:
_mode = "constant"
angle = rotate_fn.x
expected = scipy.ndimage.rotate(
self.imt[0, 0], -angle, (0, 1), not keep_size, order=_order, mode=_mode, prefilter=False
)
expected = np.stack(expected).astype(np.float32)
np.testing.assert_allclose(expected, rotated[0])
class TestRandRotate3D(NumpyImageTestCase3D):
@parameterized.expand(
[
(90, -30, (0.0, 180), False, "bilinear", "border", False, (1, 87, 104, 109)),
(45, (-20, 40), (20, 30), False, "nearest", "border", True, (1, 89, 105, 104)),
(0.0, (360, 370), (-1, 1), True, "nearest", "zeros", True, (1, 48, 64, 80)),
((-45, 0), 0, 0, False, "nearest", "zeros", False, (1, 48, 77, 90)),
]
)
def test_correct_results(self, x, y, z, keep_size, mode, padding_mode, align_corners, expected):
rotate_fn = RandRotate(
range_x=x,
range_y=y,
range_z=z,
prob=1.0,
keep_size=keep_size,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
rotate_fn.set_random_state(243)
rotated = rotate_fn(self.imt[0])
np.testing.assert_allclose(rotated.shape, expected)
if __name__ == "__main__":
unittest.main()
| [
"parameterized.parameterized.expand",
"numpy.testing.assert_allclose",
"numpy.stack",
"unittest.main",
"monai.transforms.RandRotate"
] | [((833, 1030), 'parameterized.parameterized.expand', 'parameterized.expand', (["[(90, True, 'bilinear', 'border', False), (45, True, 'nearest', 'border', \n False), (180, False, 'nearest', 'zeros', True), ((-45, 0), False,\n 'nearest', 'zeros', True)]"], {}), "([(90, True, 'bilinear', 'border', False), (45, True,\n 'nearest', 'border', False), (180, False, 'nearest', 'zeros', True), ((\n -45, 0), False, 'nearest', 'zeros', True)])\n", (853, 1030), False, 'from parameterized import parameterized\n'), ((2060, 2403), 'parameterized.parameterized.expand', 'parameterized.expand', (["[(90, -30, (0.0, 180), False, 'bilinear', 'border', False, (1, 87, 104, 109\n )), (45, (-20, 40), (20, 30), False, 'nearest', 'border', True, (1, 89,\n 105, 104)), (0.0, (360, 370), (-1, 1), True, 'nearest', 'zeros', True,\n (1, 48, 64, 80)), ((-45, 0), 0, 0, False, 'nearest', 'zeros', False, (1,\n 48, 77, 90))]"], {}), "([(90, -30, (0.0, 180), False, 'bilinear', 'border', \n False, (1, 87, 104, 109)), (45, (-20, 40), (20, 30), False, 'nearest',\n 'border', True, (1, 89, 105, 104)), (0.0, (360, 370), (-1, 1), True,\n 'nearest', 'zeros', True, (1, 48, 64, 80)), ((-45, 0), 0, 0, False,\n 'nearest', 'zeros', False, (1, 48, 77, 90))])\n", (2080, 2403), False, 'from parameterized import parameterized\n'), ((3004, 3019), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3017, 3019), False, 'import unittest\n'), ((1206, 1335), 'monai.transforms.RandRotate', 'RandRotate', ([], {'range_x': 'degrees', 'prob': '(1.0)', 'keep_size': 'keep_size', 'mode': 'mode', 'padding_mode': 'padding_mode', 'align_corners': 'align_corners'}), '(range_x=degrees, prob=1.0, keep_size=keep_size, mode=mode,\n padding_mode=padding_mode, align_corners=align_corners)\n', (1216, 1335), False, 'from monai.transforms import RandRotate\n'), ((1958, 2006), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected', 'rotated[0]'], {}), '(expected, rotated[0])\n', (1984, 2006), True, 'import numpy as np\n'), ((2581, 2726), 'monai.transforms.RandRotate', 'RandRotate', ([], {'range_x': 'x', 'range_y': 'y', 'range_z': 'z', 'prob': '(1.0)', 'keep_size': 'keep_size', 'mode': 'mode', 'padding_mode': 'padding_mode', 'align_corners': 'align_corners'}), '(range_x=x, range_y=y, range_z=z, prob=1.0, keep_size=keep_size,\n mode=mode, padding_mode=padding_mode, align_corners=align_corners)\n', (2591, 2726), False, 'from monai.transforms import RandRotate\n'), ((2919, 2970), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rotated.shape', 'expected'], {}), '(rotated.shape, expected)\n', (2945, 2970), True, 'import numpy as np\n'), ((1912, 1930), 'numpy.stack', 'np.stack', (['expected'], {}), '(expected)\n', (1920, 1930), True, 'import numpy as np\n')] |
'''
Example of VRAE on text data
VRAE, like VAE, has a modular design. encoder, decoder, and VRAE are 3 models that share weights. After training the VRAE model,
the encoder can be used to generate latent vectors of text data(sentences/documents).
The decoder can be used to generate embedding vector of text by sampling the latent vector from a Gaussian distribution with mean = 0 and std = 1.
# Reference
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Generating Sentences from a Continuous Space."
https://arxiv.org/abs/1511.06349
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.preprocessing import sequence
from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers
from keras.models import Model
from keras.datasets import imdb
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
data,
batch_size=128,
model_name="vae_mnist"):
"""Plots labels and MNIST digits as a function of the 2D latent vector
# Arguments
models (tuple): encoder and decoder models
data (tuple): test data and label
batch_size (int): prediction batch size
model_name (string): which model is using this function
"""
encoder, decoder = models
x_test, y_test = data
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "vae_mean.png")
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = encoder.predict(x_test,
batch_size=batch_size)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig(filename)
plt.show()
filename = os.path.join(model_name, "digits_over_latent.png")
# display a 30x30 2D manifold of digits
n = 30
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-4, 4, n)
grid_y = np.linspace(-4, 4, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
start_range = digit_size // 2
end_range = (n - 1) * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap='Greys_r')
plt.savefig(filename)
plt.show()
# IMDB dataset
max_features = 20000
# cut texts after this number of words
# (among top max_features most common words)
maxlen = 100
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
y_train = np.array(y_train)
y_test = np.array(y_test)
# network parameters
input_shape = (maxlen, )
embed_dim = 32
intermediate_dim = 512
latent_dim = 256
batch_size = 512
epochs = 50
# VAE model = encoder + decoder
# build encoder model
inputs = Input(shape=input_shape, name='encoder_inputs')
embedding_layer = Embedding(max_features, embed_dim, input_length=maxlen, trainable=True)
encoder_inputs = embedding_layer(inputs)
x, h, c = LSTM(intermediate_dim, return_state=True)(encoder_inputs)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z, h, c], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vrae_encoder.png', show_shapes=True)
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z')
latent_repeat = RepeatVector(maxlen)(latent_inputs)
h = Input(shape=(intermediate_dim, ), name='encoder_state_h')
c = Input(shape=(intermediate_dim, ), name='encoder_state_c')
x, _, _ = LSTM(intermediate_dim, return_sequences=True, return_state=True)(latent_repeat, initial_state=[h, c])
x, _, _ = LSTM(embed_dim, return_sequences=True, return_state=True)(x)
outputs = wrappers.TimeDistributed(Dense(embed_dim))(x)
# instantiate decoder model
decoder = Model([latent_inputs, h, c], outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vrae_decoder.png', show_shapes=True)
# instantiate VAE model
outputs = decoder(encoder(inputs)[2:])
vrae = Model(inputs, outputs, name='vrae')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Load h5 model trained weights"
parser.add_argument("-w", "--weights", help=help_)
args = parser.parse_args()
models = (encoder, decoder)
data = (x_test, y_test)
# VRAE loss = kl_loss + mse_loss
reconstruction_loss = mse(encoder_inputs, outputs)
reconstruction_loss = K.sum(reconstruction_loss, axis=-1)
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vrae_loss = K.mean(reconstruction_loss + kl_loss)
vrae.add_loss(vrae_loss)
vrae.compile(optimizer='adam')
vrae.summary()
plot_model(vrae,
to_file='vrae.png',
show_shapes=True)
if args.weights:
vrae.load_weights(args.weights)
else:
# train the autoencoder
vrae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
vrae.save_weights('vrae_mlp_mnist.h5')
plot_results(models,
data,
batch_size=batch_size,
model_name="vrae")
| [
"keras.backend.shape",
"keras.backend.sum",
"matplotlib.pyplot.ylabel",
"numpy.array",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences",
"numpy.arange",
"matplotlib.pyplot.imshow",
"keras.datasets.imdb.load_data",
"argparse.ArgumentParser",
"keras.utils.plot_model",
"matplotl... | [((4022, 4060), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (4036, 4060), False, 'from keras.datasets import imdb\n'), ((4188, 4234), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_train'], {'maxlen': 'maxlen'}), '(x_train, maxlen=maxlen)\n', (4210, 4234), False, 'from keras.preprocessing import sequence\n'), ((4244, 4289), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_test'], {'maxlen': 'maxlen'}), '(x_test, maxlen=maxlen)\n', (4266, 4289), False, 'from keras.preprocessing import sequence\n'), ((4300, 4317), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4308, 4317), True, 'import numpy as np\n'), ((4327, 4343), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4335, 4343), True, 'import numpy as np\n'), ((4539, 4586), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""encoder_inputs"""'}), "(shape=input_shape, name='encoder_inputs')\n", (4544, 4586), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((4605, 4676), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embed_dim'], {'input_length': 'maxlen', 'trainable': '(True)'}), '(max_features, embed_dim, input_length=maxlen, trainable=True)\n', (4614, 4676), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5138, 5197), 'keras.models.Model', 'Model', (['inputs', '[z_mean, z_log_var, z, h, c]'], {'name': '"""encoder"""'}), "(inputs, [z_mean, z_log_var, z, h, c], name='encoder')\n", (5143, 5197), False, 'from keras.models import Model\n'), ((5216, 5281), 'keras.utils.plot_model', 'plot_model', (['encoder'], {'to_file': '"""vrae_encoder.png"""', 'show_shapes': '(True)'}), "(encoder, to_file='vrae_encoder.png', show_shapes=True)\n", (5226, 5281), False, 'from keras.utils import plot_model\n'), ((5321, 5357), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)', 'name': '"""z"""'}), "(shape=(latent_dim,), name='z')\n", (5326, 5357), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5414, 5470), 'keras.layers.Input', 'Input', ([], {'shape': '(intermediate_dim,)', 'name': '"""encoder_state_h"""'}), "(shape=(intermediate_dim,), name='encoder_state_h')\n", (5419, 5470), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5476, 5532), 'keras.layers.Input', 'Input', ([], {'shape': '(intermediate_dim,)', 'name': '"""encoder_state_c"""'}), "(shape=(intermediate_dim,), name='encoder_state_c')\n", (5481, 5532), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5812, 5865), 'keras.models.Model', 'Model', (['[latent_inputs, h, c]', 'outputs'], {'name': '"""decoder"""'}), "([latent_inputs, h, c], outputs, name='decoder')\n", (5817, 5865), False, 'from keras.models import Model\n'), ((5884, 5949), 'keras.utils.plot_model', 'plot_model', (['decoder'], {'to_file': '"""vrae_decoder.png"""', 'show_shapes': '(True)'}), "(decoder, to_file='vrae_decoder.png', show_shapes=True)\n", (5894, 5949), False, 'from keras.utils import plot_model\n'), ((6021, 6056), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {'name': '"""vrae"""'}), "(inputs, outputs, name='vrae')\n", (6026, 6056), False, 'from keras.models import Model\n'), ((1568, 1603), 'keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(batch, dim)'}), '(shape=(batch, dim))\n', (1583, 1603), True, 'from keras import backend as K\n'), ((2147, 2185), 'os.makedirs', 'os.makedirs', (['model_name'], {'exist_ok': '(True)'}), '(model_name, exist_ok=True)\n', (2158, 2185), False, 'import os\n'), ((2202, 2242), 'os.path.join', 'os.path.join', (['model_name', '"""vae_mean.png"""'], {}), "(model_name, 'vae_mean.png')\n", (2214, 2242), False, 'import os\n'), ((2413, 2441), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (2423, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2495), 'matplotlib.pyplot.scatter', 'plt.scatter', (['z_mean[:, 0]', 'z_mean[:, 1]'], {'c': 'y_test'}), '(z_mean[:, 0], z_mean[:, 1], c=y_test)\n', (2457, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2514), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2512, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z[0]"""'], {}), "('z[0]')\n", (2529, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2542, 2560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z[1]"""'], {}), "('z[1]')\n", (2552, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2586), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (2576, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2591, 2601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2599, 2601), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2668), 'os.path.join', 'os.path.join', (['model_name', '"""digits_over_latent.png"""'], {}), "(model_name, 'digits_over_latent.png')\n", (2630, 2668), False, 'import os\n'), ((2757, 2799), 'numpy.zeros', 'np.zeros', (['(digit_size * n, digit_size * n)'], {}), '((digit_size * n, digit_size * n))\n', (2765, 2799), True, 'import numpy as np\n'), ((2919, 2940), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'n'], {}), '(-4, 4, n)\n', (2930, 2940), True, 'import numpy as np\n'), ((3345, 3373), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3355, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3526), 'numpy.arange', 'np.arange', (['start_range', 'end_range', 'digit_size'], {}), '(start_range, end_range, digit_size)\n', (3490, 3526), True, 'import numpy as np\n'), ((3548, 3567), 'numpy.round', 'np.round', (['grid_x', '(1)'], {}), '(grid_x, 1)\n', (3556, 3567), True, 'import numpy as np\n'), ((3589, 3608), 'numpy.round', 'np.round', (['grid_y', '(1)'], {}), '(grid_y, 1)\n', (3597, 3608), True, 'import numpy as np\n'), ((3613, 3652), 'matplotlib.pyplot.xticks', 'plt.xticks', (['pixel_range', 'sample_range_x'], {}), '(pixel_range, sample_range_x)\n', (3623, 3652), True, 'import matplotlib.pyplot as plt\n'), ((3657, 3696), 'matplotlib.pyplot.yticks', 'plt.yticks', (['pixel_range', 'sample_range_y'], {}), '(pixel_range, sample_range_y)\n', (3667, 3696), True, 'import matplotlib.pyplot as plt\n'), ((3701, 3719), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z[0]"""'], {}), "('z[0]')\n", (3711, 3719), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3742), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z[1]"""'], {}), "('z[1]')\n", (3734, 3742), True, 'import matplotlib.pyplot as plt\n'), ((3747, 3781), 'matplotlib.pyplot.imshow', 'plt.imshow', (['figure'], {'cmap': '"""Greys_r"""'}), "(figure, cmap='Greys_r')\n", (3757, 3781), True, 'import matplotlib.pyplot as plt\n'), ((3786, 3807), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3797, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3812, 3822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3820, 3822), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4769), 'keras.layers.LSTM', 'LSTM', (['intermediate_dim'], {'return_state': '(True)'}), '(intermediate_dim, return_state=True)\n', (4732, 4769), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((4795, 4827), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'name': '"""z_mean"""'}), "(latent_dim, name='z_mean')\n", (4800, 4827), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((4843, 4878), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'name': '"""z_log_var"""'}), "(latent_dim, name='z_log_var')\n", (4848, 4878), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5023, 5077), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(latent_dim,)', 'name': '"""z"""'}), "(sampling, output_shape=(latent_dim,), name='z')\n", (5029, 5077), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5374, 5394), 'keras.layers.RepeatVector', 'RepeatVector', (['maxlen'], {}), '(maxlen)\n', (5386, 5394), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5544, 5608), 'keras.layers.LSTM', 'LSTM', (['intermediate_dim'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(intermediate_dim, return_sequences=True, return_state=True)\n', (5548, 5608), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((5656, 5713), 'keras.layers.LSTM', 'LSTM', (['embed_dim'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(embed_dim, return_sequences=True, return_state=True)\n', (5660, 5713), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((6098, 6123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6121, 6123), False, 'import argparse\n'), ((6379, 6407), 'keras.losses.mse', 'mse', (['encoder_inputs', 'outputs'], {}), '(encoder_inputs, outputs)\n', (6382, 6407), False, 'from keras.losses import mse, binary_crossentropy\n'), ((6434, 6469), 'keras.backend.sum', 'K.sum', (['reconstruction_loss'], {'axis': '(-1)'}), '(reconstruction_loss, axis=-1)\n', (6439, 6469), True, 'from keras import backend as K\n'), ((6550, 6573), 'keras.backend.sum', 'K.sum', (['kl_loss'], {'axis': '(-1)'}), '(kl_loss, axis=-1)\n', (6555, 6573), True, 'from keras import backend as K\n'), ((6610, 6647), 'keras.backend.mean', 'K.mean', (['(reconstruction_loss + kl_loss)'], {}), '(reconstruction_loss + kl_loss)\n', (6616, 6647), True, 'from keras import backend as K\n'), ((6735, 6789), 'keras.utils.plot_model', 'plot_model', (['vrae'], {'to_file': '"""vrae.png"""', 'show_shapes': '(True)'}), "(vrae, to_file='vrae.png', show_shapes=True)\n", (6745, 6789), False, 'from keras.utils import plot_model\n'), ((1443, 1458), 'keras.backend.shape', 'K.shape', (['z_mean'], {}), '(z_mean)\n', (1450, 1458), True, 'from keras import backend as K\n'), ((1472, 1491), 'keras.backend.int_shape', 'K.int_shape', (['z_mean'], {}), '(z_mean)\n', (1483, 1491), True, 'from keras import backend as K\n'), ((2954, 2975), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'n'], {}), '(-4, 4, n)\n', (2965, 2975), True, 'import numpy as np\n'), ((5752, 5768), 'keras.layers.Dense', 'Dense', (['embed_dim'], {}), '(embed_dim)\n', (5757, 5768), False, 'from keras.layers import Lambda, Input, Embedding, Dense, LSTM, RepeatVector, wrappers\n'), ((6519, 6535), 'keras.backend.exp', 'K.exp', (['z_log_var'], {}), '(z_log_var)\n', (6524, 6535), True, 'from keras import backend as K\n'), ((1624, 1646), 'keras.backend.exp', 'K.exp', (['(0.5 * z_log_var)'], {}), '(0.5 * z_log_var)\n', (1629, 1646), True, 'from keras import backend as K\n'), ((3082, 3102), 'numpy.array', 'np.array', (['[[xi, yi]]'], {}), '([[xi, yi]])\n', (3090, 3102), True, 'import numpy as np\n'), ((6500, 6516), 'keras.backend.square', 'K.square', (['z_mean'], {}), '(z_mean)\n', (6508, 6516), True, 'from keras import backend as K\n')] |
import numpy as np
import sys,os
import cv2
caffe_root = '/home/yaochuanqi/work/tmp/ssd/'
sys.path.insert(0, caffe_root + 'python')
import caffe
net_file= 'ssdlite/coco/deploy.prototxt'
caffe_model='ssdlite/deploy.caffemodel'
test_dir = "images"
caffe.set_mode_cpu()
net = caffe.Net(net_file,caffe_model,caffe.TEST)
COCO_CLASSES = ("background" , "person" , "bicycle" , "car" , "motorcycle" ,
"airplane" , "bus" , "train" , "truck" , "boat" , "traffic light",
"fire hydrant", "N/A" , "stop sign", "parking meter", "bench" ,
"bird" , "cat" , "dog" , "horse" , "sheep" , "cow" , "elephant" ,
"bear" , "zebra" , "giraffe" , "N/A" , "backpack" , "umbrella" ,
"N/A" , "N/A" , "handbag" , "tie" , "suitcase" , "frisbee" , "skis" ,
"snowboard" , "sports ball", "kite" , "baseball bat", "baseball glove",
"skateboard" , "surfboard" , "tennis racket", "bottle" , "N/A" ,
"wine glass", "cup" , "fork" , "knife" , "spoon" , "bowl" , "banana" ,
"apple" , "sandwich" , "orange" , "broccoli" , "carrot" , "hot dog",
"pizza" , "donut" , "cake" , "chair" , "couch" , "potted plant",
"bed" , "N/A" , "dining table", "N/A" , "N/A" , "toilet" , "N/A" ,
"tv" , "laptop" , "mouse" , "remote" , "keyboard" , "cell phone",
"microwave" , "oven" , "toaster" , "sink" , "refrigerator" , "N/A" ,
"book" , "clock" , "vase" , "scissors" , "teddy bear", "hair drier",
"toothbrush" )
def preprocess(src):
img = cv2.resize(src, (300,300))
img = img - 127.5
img = img / 127.5
return img
def postprocess(img, out):
h = img.shape[0]
w = img.shape[1]
box = out['detection_out'][0,0,:,3:7] * np.array([w, h, w, h])
cls = out['detection_out'][0,0,:,1]
conf = out['detection_out'][0,0,:,2]
return (box.astype(np.int32), conf, cls)
def detect(imgfile):
origimg = cv2.imread(imgfile)
img = preprocess(origimg)
img = img.astype(np.float32)
img = img.transpose((2, 0, 1))
net.blobs['data'].data[...] = img
out = net.forward()
box, conf, cls = postprocess(origimg, out)
for i in range(len(box)):
p1 = (box[i][0], box[i][1])
p2 = (box[i][2], box[i][3])
cv2.rectangle(origimg, p1, p2, (0,255,0))
p3 = (max(p1[0], 15), max(p1[1], 15))
title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
cv2.imshow("SSD", origimg)
k = cv2.waitKey(0) & 0xff
#Exit if ESC pressed
if k == 27 : return False
return True
for f in os.listdir(test_dir):
if detect(test_dir + "/" + f) == False:
break
| [
"cv2.rectangle",
"sys.path.insert",
"os.listdir",
"cv2.imshow",
"cv2.putText",
"numpy.array",
"cv2.waitKey",
"caffe.Net",
"caffe.set_mode_cpu",
"cv2.resize",
"cv2.imread"
] | [((94, 135), 'sys.path.insert', 'sys.path.insert', (['(0)', "(caffe_root + 'python')"], {}), "(0, caffe_root + 'python')\n", (109, 135), False, 'import sys, os\n'), ((259, 279), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (277, 279), False, 'import caffe\n'), ((286, 330), 'caffe.Net', 'caffe.Net', (['net_file', 'caffe_model', 'caffe.TEST'], {}), '(net_file, caffe_model, caffe.TEST)\n', (295, 330), False, 'import caffe\n'), ((2588, 2608), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (2598, 2608), False, 'import sys, os\n'), ((1481, 1508), 'cv2.resize', 'cv2.resize', (['src', '(300, 300)'], {}), '(src, (300, 300))\n', (1491, 1508), False, 'import cv2\n'), ((1870, 1889), 'cv2.imread', 'cv2.imread', (['imgfile'], {}), '(imgfile)\n', (1880, 1889), False, 'import cv2\n'), ((2444, 2470), 'cv2.imshow', 'cv2.imshow', (['"""SSD"""', 'origimg'], {}), "('SSD', origimg)\n", (2454, 2470), False, 'import cv2\n'), ((1684, 1706), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1692, 1706), True, 'import numpy as np\n'), ((2212, 2255), 'cv2.rectangle', 'cv2.rectangle', (['origimg', 'p1', 'p2', '(0, 255, 0)'], {}), '(origimg, p1, p2, (0, 255, 0))\n', (2225, 2255), False, 'import cv2\n'), ((2370, 2439), 'cv2.putText', 'cv2.putText', (['origimg', 'title', 'p3', 'cv2.FONT_ITALIC', '(0.6)', '(0, 255, 0)', '(1)'], {}), '(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)\n', (2381, 2439), False, 'import cv2\n'), ((2481, 2495), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2492, 2495), False, 'import cv2\n')] |
import rospy
import rospkg
import numpy as np
import os
import sys
import tensorflow as tf
from collections import defaultdict
from utils import label_map_util
from utils import visualization_utils as vis_util
import time
from styx_msgs.msg import TrafficLight
SIM_MODEL_PATH = 'light_classification/model_files/frozen_inference_graph_sim.pb'
SITE_MODEL_PATH = 'light_classification/model_files/frozen_inference_graph_real.pb'
LABELS_PATH = 'light_classification/model_files/labels.pbtxt'
NUM_CLASSES = 4
class TLClassifier(object):
def __init__(self, mode='SIMULATOR'):
self.current_light = TrafficLight.UNKNOWN
CWD_PATH = os.getcwd()
model = os.path.join(CWD_PATH, SIM_MODEL_PATH)
if mode is 'SITE':
model = os.path.join(CWD_PATH, SITE_MODEL_PATH)
labels_path = os.path.join(CWD_PATH, LABELS_PATH)
label_map = label_map_util.load_labelmap(labels_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
self.image_np_deep = None
self.detection_graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
jit_level = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jit_level
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph, config=config)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
print("Loaded frozen model graph for mode = {}".format(mode))
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
self.current_light = TrafficLight.UNKNOWN
image_expanded = np.expand_dims(image, axis=0)
time1 = time.time()
with self.detection_graph.as_default():
(boxes, scores, classes, num) = self.sess.run([self.detection_boxes,
self.detection_scores,
self.detection_classes,
self.num_detections],
feed_dict={self.image_tensor:image_expanded})
time2 = time.time()
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
min_score_threshold = 0.5
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_threshold:
class_name = self.category_index[classes[i]]['name']
rospy.loginfo('Light Color : {}'.format(class_name))
rospy.loginfo('Time for inference : {}ms'.format((time2-time1)*1000))
if class_name == 'Red':
self.current_light = TrafficLight.RED
elif class_name == 'Yellow':
self.current_light = TrafficLight.YELLOW
elif class_name == 'Green':
self.current_light = TrafficLight.GREEN
else:
self.current_light = TrafficLight.UNKNOWN
#self.image_np_deep = image
return self.current_light
| [
"utils.label_map_util.load_labelmap",
"tensorflow.Graph",
"tensorflow.Session",
"os.path.join",
"tensorflow.GraphDef",
"utils.label_map_util.convert_label_map_to_categories",
"os.getcwd",
"numpy.squeeze",
"utils.label_map_util.create_category_index",
"numpy.expand_dims",
"tensorflow.import_graph... | [((633, 644), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (642, 644), False, 'import os\n'), ((656, 694), 'os.path.join', 'os.path.join', (['CWD_PATH', 'SIM_MODEL_PATH'], {}), '(CWD_PATH, SIM_MODEL_PATH)\n', (668, 694), False, 'import os\n'), ((784, 819), 'os.path.join', 'os.path.join', (['CWD_PATH', 'LABELS_PATH'], {}), '(CWD_PATH, LABELS_PATH)\n', (796, 819), False, 'import os\n'), ((834, 875), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['labels_path'], {}), '(labels_path)\n', (862, 875), False, 'from utils import label_map_util\n'), ((891, 1005), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (937, 1005), False, 'from utils import label_map_util\n'), ((1043, 1091), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1079, 1091), False, 'from utils import label_map_util\n'), ((1146, 1156), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1154, 1156), True, 'import tensorflow as tf\n'), ((1169, 1185), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1183, 1185), True, 'import tensorflow as tf\n'), ((2760, 2789), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2774, 2789), True, 'import numpy as np\n'), ((2801, 2812), 'time.time', 'time.time', ([], {}), '()\n', (2810, 2812), False, 'import time\n'), ((3123, 3134), 'time.time', 'time.time', ([], {}), '()\n', (3132, 3134), False, 'import time\n'), ((3149, 3166), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3159, 3166), True, 'import numpy as np\n'), ((3178, 3196), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3188, 3196), True, 'import numpy as np\n'), ((727, 766), 'os.path.join', 'os.path.join', (['CWD_PATH', 'SITE_MODEL_PATH'], {}), '(CWD_PATH, SITE_MODEL_PATH)\n', (739, 766), False, 'import os\n'), ((1397, 1410), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1408, 1410), True, 'import tensorflow as tf\n'), ((1603, 1656), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph', 'config': 'config'}), '(graph=self.detection_graph, config=config)\n', (1613, 1656), True, 'import tensorflow as tf\n'), ((1420, 1447), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model', '"""rb"""'], {}), "(model, 'rb')\n", (1434, 1447), True, 'import tensorflow as tf\n'), ((1545, 1587), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1564, 1587), True, 'import tensorflow as tf\n'), ((3209, 3228), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3219, 3228), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy.linalg import toeplitz
import sys
import os
from config import config
import timecorr as tc
from matplotlib import pyplot as plt
import seaborn as sns
sim_function = sys.argv[1]
r = sys.argv[2] #reps
F = int(sys.argv[3]) #number of features
T = int(sys.argv[4]) #number of timepoints
K = 2 #order
W = int(sys.argv[5])
wp = sys.argv[6]
fname = sim_function + '_' + str(F) + '_' + str(T) + '_' + str(W) + '_' + wp
results_dir = os.path.join(config['resultsdir'], 'higher_order_sims_search',
sim_function + '_' + str(T)+ '_' + str(F)+ '_' + str(W))
try:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
except OSError as err:
print(err)
def expanded_vec2mat(v):
m = tc.vec2mat(v)
x = np.zeros([v.shape[0], m.shape[0] ** 2])
for t in range(m.shape[2]):
x[t, :] = m[:, :, t].ravel()
return x
laplace = {'name': 'Laplace', 'weights': tc.laplace_weights, 'params': {'scale': W}}
gaussian = {'name': 'Gaussian', 'weights': tc.gaussian_weights, 'params': {'var': W}}
mexican_hat = {'name': 'Mexican hat', 'weights': tc.mexican_hat_weights, 'params': {'sigma': W}}
weights_paramter = eval(wp)
eye_params = {}
def eye_weights(T, params=eye_params):
return np.eye(T)
def generate_templates(order=1, **kwargs):
kwargs['return_corrs'] = True
_, next_template = tc.simulate_data(**kwargs)
T = kwargs['T']
templates = []
for n in range(order - 1):
print(n)
templates.append(next_template)
expanded_corrmats = tc.vec2mat(next_template)
K2 = expanded_corrmats.shape[0] ** 2
next_template = np.zeros([K2, K2, T])
for t in range(T):
x = expanded_corrmats[:, :, t]
next_template[:, :, t] = np.kron(x, x)
next_template = tc.mat2vec(next_template)
templates.append(next_template)
return templates
def generate_data(templates):
order = len(templates) + 1
adjusted_templates = [templates[-1]] # generate adjusted templates in reverse order
next_corrmats = adjusted_templates[-1]
for n in range(order - 1, 1, -1):
print(n)
corrmats = tc.vec2mat(next_corrmats)
K = corrmats.shape[0]
sK = int(np.sqrt(K))
T = corrmats.shape[2]
draws = np.zeros([sK, sK, T])
means = tc.vec2mat(templates[n - 2])
for t in range(T):
draws[:, :, t] = np.reshape(np.random.multivariate_normal(means[:, :, t].ravel(), corrmats[:, :, t]),
[sK, sK])
next_corrmats = tc.mat2vec(draws)
adjusted_templates.append(next_corrmats)
corrmats = tc.vec2mat(next_corrmats)
K = int(corrmats.shape[0])
T = corrmats.shape[2]
data = np.zeros([T, K])
for t in range(T):
data[t, :] = np.random.multivariate_normal(np.zeros([K]), corrmats[:, :, t])
adjusted_templates.reverse()
return data, adjusted_templates
save_file = os.path.join(results_dir, str(r))
if not os.path.exists(save_file):
recovery_performance_all = pd.DataFrame()
templates = generate_templates(order=K, S=1, T=T, K=F, datagen=sim_function)
data, adjusted_templates = generate_data(templates)
get_f = lambda y: int((1/2) * (np.sqrt(8*y + 1) - 1)) #solve for x in y = ((x^2 - x)/2) + x
recovery_performance = pd.DataFrame(index=np.arange(T), columns=np.arange(1, K+1))
recovery_performance.index.name = 'time'
recovery_performance.columns.name = 'order'
next_data = data
recovered_corrs_raw = []
recovered_corrs_smooth = []
for k in np.arange(1, K+1):
next_recovered_smooth = tc.timecorr(next_data, weights_function=weights_paramter['weights'],
weights_params=weights_paramter['params'])
next_recovered_raw = tc.timecorr(next_data, weights_function=eye_weights, weights_params=eye_params)
recovered_corrs_smooth.append(next_recovered_smooth)
F_new = get_f(next_recovered_smooth.shape[1])
for t in np.arange(T):
recovery_performance.loc[t, k] = np.corrcoef(templates[k-1][t, F_new:], next_recovered_smooth[t, F_new:])[0, 1]
next_data = expanded_vec2mat(next_recovered_raw)
recovery_performance.columns = [str(x + 1) for x in np.arange(K)]
recovery_performance['iteration'] = int(r)
recovery_performance_all = recovery_performance_all.append(recovery_performance)
if not os.path.isfile(save_file + '.csv'):
recovery_performance.to_csv(save_file + '.csv')
else:
append_iter = pd.read_csv(save_file + '.csv', index_col=0)
append_iter = append_iter.append(recovery_performance)
append_iter.to_csv(save_file + '.csv')
| [
"os.path.exists",
"numpy.eye",
"numpy.sqrt",
"os.makedirs",
"timecorr.vec2mat",
"timecorr.mat2vec",
"pandas.read_csv",
"timecorr.timecorr",
"numpy.corrcoef",
"os.path.isfile",
"numpy.kron",
"numpy.zeros",
"timecorr.simulate_data",
"pandas.DataFrame",
"numpy.arange"
] | [((777, 790), 'timecorr.vec2mat', 'tc.vec2mat', (['v'], {}), '(v)\n', (787, 790), True, 'import timecorr as tc\n'), ((797, 836), 'numpy.zeros', 'np.zeros', (['[v.shape[0], m.shape[0] ** 2]'], {}), '([v.shape[0], m.shape[0] ** 2])\n', (805, 836), True, 'import numpy as np\n'), ((1278, 1287), 'numpy.eye', 'np.eye', (['T'], {}), '(T)\n', (1284, 1287), True, 'import numpy as np\n'), ((1385, 1411), 'timecorr.simulate_data', 'tc.simulate_data', ([], {}), '(**kwargs)\n', (1401, 1411), True, 'import timecorr as tc\n'), ((2635, 2660), 'timecorr.vec2mat', 'tc.vec2mat', (['next_corrmats'], {}), '(next_corrmats)\n', (2645, 2660), True, 'import timecorr as tc\n'), ((2729, 2745), 'numpy.zeros', 'np.zeros', (['[T, K]'], {}), '([T, K])\n', (2737, 2745), True, 'import numpy as np\n'), ((2981, 3006), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (2995, 3006), False, 'import os\n'), ((3040, 3054), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3052, 3054), True, 'import pandas as pd\n'), ((3570, 3589), 'numpy.arange', 'np.arange', (['(1)', '(K + 1)'], {}), '(1, K + 1)\n', (3579, 3589), True, 'import numpy as np\n'), ((645, 672), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (659, 672), False, 'import os\n'), ((682, 706), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (693, 706), False, 'import os\n'), ((1551, 1576), 'timecorr.vec2mat', 'tc.vec2mat', (['next_template'], {}), '(next_template)\n', (1561, 1576), True, 'import timecorr as tc\n'), ((1638, 1659), 'numpy.zeros', 'np.zeros', (['[K2, K2, T]'], {}), '([K2, K2, T])\n', (1646, 1659), True, 'import numpy as np\n'), ((1785, 1810), 'timecorr.mat2vec', 'tc.mat2vec', (['next_template'], {}), '(next_template)\n', (1795, 1810), True, 'import timecorr as tc\n'), ((2135, 2160), 'timecorr.vec2mat', 'tc.vec2mat', (['next_corrmats'], {}), '(next_corrmats)\n', (2145, 2160), True, 'import timecorr as tc\n'), ((2267, 2288), 'numpy.zeros', 'np.zeros', (['[sK, sK, T]'], {}), '([sK, sK, T])\n', (2275, 2288), True, 'import numpy as np\n'), ((2305, 2333), 'timecorr.vec2mat', 'tc.vec2mat', (['templates[n - 2]'], {}), '(templates[n - 2])\n', (2315, 2333), True, 'import timecorr as tc\n'), ((2552, 2569), 'timecorr.mat2vec', 'tc.mat2vec', (['draws'], {}), '(draws)\n', (2562, 2569), True, 'import timecorr as tc\n'), ((3619, 3734), 'timecorr.timecorr', 'tc.timecorr', (['next_data'], {'weights_function': "weights_paramter['weights']", 'weights_params': "weights_paramter['params']"}), "(next_data, weights_function=weights_paramter['weights'],\n weights_params=weights_paramter['params'])\n", (3630, 3734), True, 'import timecorr as tc\n'), ((3795, 3874), 'timecorr.timecorr', 'tc.timecorr', (['next_data'], {'weights_function': 'eye_weights', 'weights_params': 'eye_params'}), '(next_data, weights_function=eye_weights, weights_params=eye_params)\n', (3806, 3874), True, 'import timecorr as tc\n'), ((4001, 4013), 'numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (4010, 4013), True, 'import numpy as np\n'), ((4409, 4443), 'os.path.isfile', 'os.path.isfile', (["(save_file + '.csv')"], {}), "(save_file + '.csv')\n", (4423, 4443), False, 'import os\n'), ((4533, 4577), 'pandas.read_csv', 'pd.read_csv', (["(save_file + '.csv')"], {'index_col': '(0)'}), "(save_file + '.csv', index_col=0)\n", (4544, 4577), True, 'import pandas as pd\n'), ((1751, 1764), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (1758, 1764), True, 'import numpy as np\n'), ((2208, 2218), 'numpy.sqrt', 'np.sqrt', (['K'], {}), '(K)\n', (2215, 2218), True, 'import numpy as np\n'), ((2821, 2834), 'numpy.zeros', 'np.zeros', (['[K]'], {}), '([K])\n', (2829, 2834), True, 'import numpy as np\n'), ((3339, 3351), 'numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (3348, 3351), True, 'import numpy as np\n'), ((3361, 3380), 'numpy.arange', 'np.arange', (['(1)', '(K + 1)'], {}), '(1, K + 1)\n', (3370, 3380), True, 'import numpy as np\n'), ((4250, 4262), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (4259, 4262), True, 'import numpy as np\n'), ((4056, 4130), 'numpy.corrcoef', 'np.corrcoef', (['templates[k - 1][t, F_new:]', 'next_recovered_smooth[t, F_new:]'], {}), '(templates[k - 1][t, F_new:], next_recovered_smooth[t, F_new:])\n', (4067, 4130), True, 'import numpy as np\n'), ((3230, 3248), 'numpy.sqrt', 'np.sqrt', (['(8 * y + 1)'], {}), '(8 * y + 1)\n', (3237, 3248), True, 'import numpy as np\n')] |
'''
#TODO refactor this module
'''
import numpy as np
from pathlib import Path
import pandas as pd
import sys
from file_py_helper.ExtraInfo import EC_Properties
if __name__ == "__main__":
pass
import logging
logger = logging.getLogger(__name__)
def RHE_potential_assignment(ovv_row):
"""
This function tries to determine a valid value for the RHE potential
from the filename in several ways
Parameters
----------
ovv_row : pd.DataFramw row, namedtuple
this is a row of the overall index dataframe which contains the
information to determine the potential in mV
keys:
RHE_fn, RHE_mean, PAR_date, PAR_file
Returns
-------
RHE_potential : float
RHE value /1000 for unit V
"""
CVrow = ovv_row
RHE_potential = 0
if np.abs(CVrow.RHE_fn) > 3:
RHE_fn = np.abs(CVrow.RHE_fn) / 1000
else:
RHE_fn = np.abs(CVrow.RHE_fn)
if np.abs(CVrow.RHE_mean) > 3:
RHE_mean = np.abs(CVrow.RHE_mean) / 1000
else:
RHE_mean = np.abs(CVrow.RHE_mean)
if RHE_fn == 0 and RHE_mean == 0:
# Clean up Ovv again for 0 values
try:
OVV_prox = CVrow.loc[
((CVrow.PAR_date - CVrow.PAR_date) != pd.Timedelta(seconds=0))
]
OVVproxRHE_fn = [i for i in OVV_prox.RHE_fn.unique() if i != 0]
except:
OVVproxRHE_fn = []
if OVVproxRHE_fn:
RHE_potential = OVVproxRHE_fn[0]
logger.warning(
"CRITICAL Create CV, RHE problem both are 0, guessed from other Files {0} mV".format(
RHE_potential, Path(CVrow.PAR_file).name
)
)
else:
RHE_potential = EC_Properties.guess_RHE_from_Electrolyte(CVrow.Electrolyte)
logger.warning(
"CRITICAL Create CV, RHE problem both are 0, guessed from Electrolyte {0} mV".format(
RHE_potential, Path(CVrow.PAR_file).name
)
)
elif RHE_fn != 0 and RHE_mean == 0:
RHE_potential = RHE_fn
elif RHE_fn == 0 and RHE_mean != 0:
RHE_potential = RHE_mean
elif RHE_fn != 0 and RHE_mean != 0:
# RHE_fn == RHE_mean:
if any([np.isclose(RHE_fn, RHE_mean, atol=0.004)]):
RHE_potential = RHE_mean
else:
try:
RHE_fromfn_opts = [
b
for b in [
float(i)
for i in Path(CVrow.PAR_file).stem.split("_")
if i.isdigit()
]
if b < 1100 and b > 150 and not b == 300 and not b == 10
]
if RHE_fromfn_opts:
RHE_fromfn = RHE_fromfn_opts[0] / 1000
if any([np.isclose(RHE_fn, RHE_fromfn, atol=0.001)]):
RHE_potential = RHE_fn
elif any([np.isclose(RHE_mean, RHE_fromfn, atol=0.001)]):
RHE_potential = RHE_mean
else:
logger.warning(
"Create CV, RHE conflicting both are (%.3f, %.3f) took [%.3f] for %s"
% (RHE_fn, RHE_mean, RHE_fromfn, Path(CVrow.PAR_file).name)
)
RHE_potential = RHE_fromfn
else:
logger.warning(
"CIRITAL RHE ERROR, Create CV, RHE (%.3f, %.3f) empty for %s"
% (RHE_fn, RHE_mean, Path(CVrow.PAR_file).name)
)
except Exception as e:
try:
logger.error(
"CRITICAL ERROR Create CV, RHE problem both are non-zero and no.%s \n %s"
% Path(CVrow.PAR_file).name,
e,
)
except Exception as e2:
logger.error(
"CRITICAL ERROR Create CV, RHE problem both are non-zero and no.%s \n %s"
% (Path(CVrow.PAR_file), e2)
)
else:
logger.error(
"Create CV, RHE critical error are %s and %s for %s"
% (RHE_fn, RHE_mean, Path(CVrow.PAR_file).name)
)
if RHE_potential > 2:
RHE_potential = RHE_potential / 1000
elif RHE_potential == 0:
RHE_potential = np.array([RHE_fn, RHE_mean]).max()
logger.error(
"Create CV, RHE = 0 crit. error are %s and %s for %s.\Took :%s"
% (RHE_fn, RHE_mean, Path(CVrow.PAR_file).name, RHE_potential)
)
if RHE_potential > 2:
RHE_potential = RHE_potential / 1000
return RHE_potential
| [
"logging.getLogger",
"numpy.abs",
"numpy.isclose",
"pathlib.Path",
"file_py_helper.ExtraInfo.EC_Properties.guess_RHE_from_Electrolyte",
"pandas.Timedelta",
"numpy.array"
] | [((226, 253), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (243, 253), False, 'import logging\n'), ((823, 843), 'numpy.abs', 'np.abs', (['CVrow.RHE_fn'], {}), '(CVrow.RHE_fn)\n', (829, 843), True, 'import numpy as np\n'), ((921, 941), 'numpy.abs', 'np.abs', (['CVrow.RHE_fn'], {}), '(CVrow.RHE_fn)\n', (927, 941), True, 'import numpy as np\n'), ((949, 971), 'numpy.abs', 'np.abs', (['CVrow.RHE_mean'], {}), '(CVrow.RHE_mean)\n', (955, 971), True, 'import numpy as np\n'), ((1055, 1077), 'numpy.abs', 'np.abs', (['CVrow.RHE_mean'], {}), '(CVrow.RHE_mean)\n', (1061, 1077), True, 'import numpy as np\n'), ((866, 886), 'numpy.abs', 'np.abs', (['CVrow.RHE_fn'], {}), '(CVrow.RHE_fn)\n', (872, 886), True, 'import numpy as np\n'), ((996, 1018), 'numpy.abs', 'np.abs', (['CVrow.RHE_mean'], {}), '(CVrow.RHE_mean)\n', (1002, 1018), True, 'import numpy as np\n'), ((1759, 1818), 'file_py_helper.ExtraInfo.EC_Properties.guess_RHE_from_Electrolyte', 'EC_Properties.guess_RHE_from_Electrolyte', (['CVrow.Electrolyte'], {}), '(CVrow.Electrolyte)\n', (1799, 1818), False, 'from file_py_helper.ExtraInfo import EC_Properties\n'), ((1260, 1283), 'pandas.Timedelta', 'pd.Timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (1272, 1283), True, 'import pandas as pd\n'), ((4482, 4510), 'numpy.array', 'np.array', (['[RHE_fn, RHE_mean]'], {}), '([RHE_fn, RHE_mean])\n', (4490, 4510), True, 'import numpy as np\n'), ((1659, 1679), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (1663, 1679), False, 'from pathlib import Path\n'), ((1984, 2004), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (1988, 2004), False, 'from pathlib import Path\n'), ((2283, 2323), 'numpy.isclose', 'np.isclose', (['RHE_fn', 'RHE_mean'], {'atol': '(0.004)'}), '(RHE_fn, RHE_mean, atol=0.004)\n', (2293, 2323), True, 'import numpy as np\n'), ((4648, 4668), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (4652, 4668), False, 'from pathlib import Path\n'), ((4321, 4341), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (4325, 4341), False, 'from pathlib import Path\n'), ((2866, 2908), 'numpy.isclose', 'np.isclose', (['RHE_fn', 'RHE_fromfn'], {'atol': '(0.001)'}), '(RHE_fn, RHE_fromfn, atol=0.001)\n', (2876, 2908), True, 'import numpy as np\n'), ((2989, 3033), 'numpy.isclose', 'np.isclose', (['RHE_mean', 'RHE_fromfn'], {'atol': '(0.001)'}), '(RHE_mean, RHE_fromfn, atol=0.001)\n', (2999, 3033), True, 'import numpy as np\n'), ((3604, 3624), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (3608, 3624), False, 'from pathlib import Path\n'), ((3868, 3888), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (3872, 3888), False, 'from pathlib import Path\n'), ((4143, 4163), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (4147, 4163), False, 'from pathlib import Path\n'), ((2550, 2570), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (2554, 2570), False, 'from pathlib import Path\n'), ((3311, 3331), 'pathlib.Path', 'Path', (['CVrow.PAR_file'], {}), '(CVrow.PAR_file)\n', (3315, 3331), False, 'from pathlib import Path\n')] |
import threading, queue, time
from queue import Queue
from threading import Thread, currentThread
import os
from CalibrateTransfer.img_operation import ScreenSHot_batch
from CalibrateTransfer.data_preprocess import write_data_to_json_file, read_data_from_json_file_v2
import numpy as np
import torch.utils.data as data
import torch
import json
import shutil
from ReID_model.modeling import ReID_Model
from utils_BINGO.K_Means import k_means
from ReID_model.utils.dataset_loader import ReID_imgs_load_by_home_and_away
import logging
from utils.log import Log
from utils.timer import Timer
from utils.dir_related_operation import makedir_v1
import cv2
from SVHN.svhn import load_in_Svhn_model
from torchvision import transforms
from PIL import Image
from utils_BINGO.Number_Rectifier import Number_Rectifier
class SVHN_Predict():
def __init__(self, opt, ReIDCfg, Num_Pred_opt, Pose_output_queue, S_Number_Predict,
vis=False, save_results=False, queueSize=1024):
self.opt = opt
self.dir_name = opt.dir_name
self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
# logger.info('目标文件夹是{}'.format(self.root_path))
self.file_name = opt.file_name
self.file_save_name_before_Number_Rectify = 'Step1_'
self.file_save_name_after_Number_Rectify = 'Step2_'
# 本来就是要载入两次视频,分开读亦可以
self.Videoparameters, \
self.setting_parameter, \
self.action_datas, \
self.channel_list, \
self.parameter = read_data_from_json_file_v2(self.root_path, self.file_name, self.opt)
# 号码纠正器, 根据四官报告来修改参数
self.Number_Rectifier = Number_Rectifier
self.datalen = len(self.action_datas)
self.batch_size = 60
self.Num_Pred_opt = Num_Pred_opt # 用来设置号码识别模型的参数。
self.SVHN_predictor = load_in_Svhn_model(self.Num_Pred_opt)
self.input_Q = Pose_output_queue # 骨骼关键节点检测后的输入结果
self.PreProcess_Q = Queue(maxsize=queueSize) # 在号码识别前,对输入图片进行预处理。
self.SVHN_Q = Queue(maxsize=queueSize)
self.transform = transforms.Compose([
transforms.Resize([54, 54]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
self.vis = vis
if self.vis == True:
self.vis_path = os.path.join(self.root_path, 'vis')
os.makedirs(self.vis_path, exist_ok=True)
self.S_Number_Predict = S_Number_Predict
self.S_Final = max( S_Number_Predict - 1, 0)
self.height_threshold = 21
self.width_threshold = 12
self.save_results = save_results
if self.save_results == True:
self.intermediate_results_dir = os.path.join(self.root_path, 'intermediate_results', 'SVHN_Predict')
os.makedirs(self.intermediate_results_dir, exist_ok=True)
self.main_imgs_dir = os.path.join(self.root_path, 'intermediate_results', 'main_imgs')
self.FMLoader_dir = os.path.join(self.root_path, 'intermediate_results', 'FMLoader')
# 加载 ReID 模型
self.ReIDCfg = ReIDCfg
self.num_cls = 4 # 场上有几种类型的人
self.logger = Log(__name__, 'SVHN_Predict').getlog()
def Read_From_Cache(self):
'''
从文件把之前计算过的结果提取出来
'''
self.logger.debug( 'The pid of SVHN_Predict.Read_From_Cache() : {}'.format(os.getpid()))
self.logger.debug( 'The thread of SVHN_Predict.Read_From_Cache() : {}'.format(currentThread()))
self.load_intermediate_resutls(self.S_Final)
self.logger.log(24, ' SVHN_Predict loads action {} from Cache file '.format(self.S_Final))
def PreProcess_(self):
self.t_PreProcess = Thread(target=self.PreProcess, args=())
self.t_PreProcess.daemon = True
self.t_PreProcess.start()
def PreProcess(self):
'''
对需要号码识别的图片进行预处理。
'''
self.logger.debug('The pid of SVHN_Predict.PreProcess() : {}'.format(os.getpid()))
self.logger.debug('The thread of SVHN_Predict.PreProcess() : {}'.format(currentThread()))
PreProcess_timer = Timer()
for action_index in range(self.S_Number_Predict, self.datalen):
PreProcess_timer.tic() # 开始计时
self.logger.debug('PreProcess() ======================================== action {}'.format(action_index))
Flag, input_results = self.input_Q.get()
if Flag == False:
# Flag == False 的话,直接就不要了
self.PreProcess_Q.put((False, (action_index, [])))
continue
#输入的数据有意义,可以接着处理
[input_index, sub_imgs_out, target_regions] = input_results
if input_index != action_index:
self.logger.log(31, '---——————————————————————————————————index does match')
raise Exception(
'SVHN_Predict.PreProcess action_index_update {} != input_index {} '.format(action_index,
input_index))
# 对数据进行预处理。
rectangle_imgs,original_imgs = self.img_pre_for_SVHN(sub_imgs_out,target_regions)
if type(rectangle_imgs) != torch.Tensor:
self.PreProcess_Q.put((False, (action_index, [])))
else:
self.PreProcess_Q.put((True, (action_index, rectangle_imgs, original_imgs)))
# self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
self.logger.log(24, 'SVHN_Predict.PreProcess() action {} consums {}s'.format(action_index, PreProcess_timer.toc()))
def img_pre_for_SVHN(self,sub_imgs_out,target_regions):
'''
对需要 SVHN 的图片进行 数据预处理的 具体操作
'''
rectangle_imgs = []
original_imgs = []
for target_index in range(len(target_regions)) :
sub_img = sub_imgs_out[target_index]
[xmin, xmax, ymin, ymax] = target_regions[target_index]
i_height, i_weight, i_channel = sub_img.shape
crop_img = sub_img[max(ymin, 0):min(i_height, ymax), max(xmin, 0):min(xmax, i_weight)]
h_i, w_i, _ = crop_img.shape
if h_i < self.height_threshold or w_i < self.width_threshold:
# 如果背部区域太小了,也要舍弃。
continue
crop_image = Image.fromarray(crop_img)
crop_image = self.transform(crop_image)
rectangle_imgs.append(crop_image)
original_imgs.append(sub_img)
# 如果都不符合条件的话。
if len(rectangle_imgs) == 0:
return None, None
rectangle_imgs = torch.stack(rectangle_imgs, dim=0)
return rectangle_imgs,original_imgs
def Predict_(self):
self.t_Predict = Thread(target=self.Predict, args=())
self.t_Predict.daemon = True
self.t_Predict.start()
def Predict(self):
'''
使用 SVHN 对完成预处理的图片进行号码预测
'''
Predict_timer = Timer()
self.logger.debug( 'The pid of SVHN_Predict.Predict() : {}'.format(os.getpid()))
self.logger.debug( 'The thread of SVHN_Predict.Predict() : {}'.format(currentThread()))
for action_index in range(self.S_Number_Predict, self.datalen):
Predict_timer.tic() # 开始计时
PreProcess_Flag, PreResults = self.PreProcess_Q.get()
self.logger.debug('Predict() ======================================== action {}'.format(action_index))
if PreProcess_Flag == False:
# 输入的数据无意义
preNum = -1
self.action_datas[action_index]['predicted_nums'] = []
else:
# 输入的数据有意义, 读取数据
_, rectangle_imgs,original_imgs = PreResults
imgs_length = rectangle_imgs.size(0)
leftover = 0
if (imgs_length) % self.batch_size:
leftover = 1
num_batches = imgs_length // self.batch_size + leftover
if self.vis == True:
vis_dir = os.path.join(self.vis_path,'{}'.format(action_index),'SVHN_Predict')
makedir_v1(vis_dir)
vis_dir_0 = os.path.join(self.vis_path, '{}'.format(action_index), 'SVHN_Predict_Minus_one')
makedir_v1(vis_dir_0)
NumsArray = []
for j in range(num_batches):
input_imgs_j = rectangle_imgs[j*self.batch_size:min((j+1)*self.batch_size , imgs_length)]
length_logits_j, digits_logits_j = self.SVHN_predictor(input_imgs_j.cuda())
'''This max function return two column, the first row is value, and the second row is index '''
length_predictions_j = length_logits_j.max(1)[1].cpu().tolist()
digits_predictions_j = [digits_logits_j.max(1)[1].cpu().tolist() for digits_logits_j in digits_logits_j]
NumsArray_j = []
for Num_i in range(len(length_predictions_j)):
Number_len = length_predictions_j[Num_i]
if Number_len == 1:
Num = digits_predictions_j[0][Num_i]
NumsArray_j.append(Num)
elif Number_len == 2:
Num = digits_predictions_j[0][Num_i] * 10 + digits_predictions_j[1][Num_i]
NumsArray_j.append(Num)
elif Number_len == 0:
Num = -1
if self.vis == True:
cv2.imwrite(os.path.join(vis_dir_0, '{}_P{}.jpg'.format(num_batches*j + Num_i, Num)), original_imgs[Num_i])
continue
else:
continue
if self.vis == True:
cv2.imwrite(os.path.join(vis_dir, '{}_P{}.jpg'.format(num_batches*j + Num_i, Num)), original_imgs[Num_i])
NumsArray.extend(NumsArray_j)
# 将数据保存下来
self.action_datas[action_index]['predicted_nums'] = NumsArray
if len(NumsArray) > 1:
# NumberArray range from 0 to 99.
# We need to count how many times does each number appear!
NumsArray = np.histogram(NumsArray, bins=100, range=(0, 100))[0]
preNum = np.argmax(NumsArray)
# if preNum == 10:
# print('wrong value')
preNum_count = NumsArray[preNum]
if np.where(NumsArray == preNum_count)[0].size > 1:
# if there are more than one number have the maximun counts, then return -1
# can sort by number classification scores.
preNum = -1
else:
preNum = -1
# 保存数据
True_num = self.action_datas[action_index]['num']
self.action_datas[action_index]['num'] = '{}'.format(preNum)
self.logger.log(24, 'SVHN_Predict.Predict action {} consums {}s'.format(action_index, Predict_timer.toc()))
self.logger.log(24,'action {} ====================== True num = {}, Predict num = {} ============='.format(
action_index,True_num,preNum))
if self.save_results == True:
self.save_intermediate_resutls(action_index)
self.logger.log(24, '-----------------------------Finished SVHN_Predict.Predict() datalen = {}-----------------------------'.format(self.datalen))
# Finished 完成了所有的计算,保存最终结果,未进行号码矫正
write_data_to_json_file(self.root_path, self.file_name, self.action_datas, self.parameter, file_save_name=self.file_save_name_before_Number_Rectify)
# 根据四官报告修改最终结果
self.action_datas = self.Number_Rectifier(os.path.join(self.root_path,self.file_save_name_before_Number_Rectify + self.file_name)).rectify()
self.logger.log(24, 'Successfully Rectify numbers according to four officials report')
write_data_to_json_file(self.root_path, self.file_name, self.action_datas, self.parameter, file_save_name=self.file_save_name_after_Number_Rectify)
# 并根据ReID特征划分主图片。
self.cluster_main_imgs()
def save_intermediate_resutls(self, action_index):
'''将每一次计算的结果保存下来。'''
intermediate_resutls_path = os.path.join(self.intermediate_results_dir,'{}'.format(action_index))
os.makedirs(intermediate_resutls_path,exist_ok=True)
json_file = os.path.join(intermediate_resutls_path, '{}_action_data.json'.format(action_index))
with open(json_file,'w') as f:
json.dump(self.action_datas,f)
def load_intermediate_resutls(self, action_index):
'''将中间结果读取出来'''
intermediate_resutls_path = os.path.join(self.intermediate_results_dir, '{}'.format(action_index))
os.makedirs(intermediate_resutls_path, exist_ok=True)
json_file = os.path.join(intermediate_resutls_path, '{}_action_data.json'.format(action_index))
with open(json_file, 'r') as f:
self.action_datas = json.load(f)
def mk_cluster_dirs(self, save_dir, num_cls):
'''
save_dir : 保存分类结果的根目录
num_cls : 分类的数量,种类数
'''
for i in range(num_cls):
sub_dir = os.path.join(save_dir, str(i))
if os.path.exists(sub_dir):
shutil.rmtree(sub_dir)
os.makedirs(sub_dir, exist_ok=True)
def generate_main_imgs(self):
'''在追踪结果的基础之上,生成各个动作的主图片。'''
if os.path.exists(self.main_imgs_dir):
shutil.rmtree(self.main_imgs_dir)
os.makedirs(self.main_imgs_dir)
FMLoader = self.FMLoader_dir
if os.path.exists(FMLoader):
print('{} exists'.format(FMLoader))
action_indexes = os.listdir(FMLoader)
action_indexes = sorted(action_indexes, key=lambda x: int(x))
for action_index in action_indexes:
action_dir = os.path.join(FMLoader, '{}'.format(action_index))
if os.path.exists(action_dir):
target_read_path = os.path.join(action_dir, '0.jpg')
target_save_path = os.path.join(self.main_imgs_dir, '{}.jpg'.format(action_index))
shutil.copy(target_read_path, target_save_path)
self.logger.log(24, 'SVHN_Predict.generate_main_imgs() Finished')
def cluster_main_imgs(self):
'''
:param ReID: ReID model
:param ReIDCfg: ReID configure
:param main_img_dir: The dir save the imgs which the programme what to cluster.
:param action_datas:
:param save_dir:
:param num_cls: how many classes that the programme want !
:return:
'''
# 计时器
cluster_main_imgs_timer = Timer()
cluster_main_imgs_timer.tic()
'''在追踪结果的基础之上,生成各个动作的主图片。'''
self.generate_main_imgs()
# 创建ReID模型
self.ReID = ReID_Model(self.ReIDCfg)
self.ReID.cuda()
# make directories to save the clustered imgs.
action_datas = self.action_datas
# 场上有四类目标人物,创建四个子文件夹
save_dir = self.main_imgs_dir
self.mk_cluster_dirs(save_dir, self.num_cls)
'''Preprocess the imgs before ReID'''
if not os.path.exists(self.main_imgs_dir):
raise ValueError("The main_img_dir is not exits")
'''对要输入ReID网络的图片进行预处理'''
imgs_arrays_all, img_names_all = ReID_imgs_load_by_home_and_away(self.ReIDCfg, self.main_imgs_dir, self.action_datas)
# 分成主客两队
cls_res_all = {'Home': 0, 'Away': 2} # 主队保存在前两个文件夹 0 和 1, 客队保存在后两个文件夹 2 和 3
for TeanIndex, TeamType in enumerate(['Home', 'Away']):
imgs_arrays = imgs_arrays_all[TeamType]
img_names = img_names_all[TeamType]
cls_res = cls_res_all[TeamType]
all_feats = [] # 用来存储各个动作主图片的ReID特征
with torch.no_grad():
for imgs_array in imgs_arrays:
imgs_array = imgs_array.to('cuda')
feats = self.ReID(imgs_array).cpu().numpy().tolist()
all_feats.extend(feats)
length = len(all_feats)
self.logger.log(24, ' ReID models ,there are {} actions of TeamType {} want to be delt with.'.format(length,TeamType))
'''根据ReID特征,进行分类,分成num_cls类, 门将和球员'''
assignments, dataset = k_means(all_feats, 2)
'''根据分类结果,将图片按文件夹分类'''
for index, cls in enumerate(assignments):
cls += cls_res # 所要保存的文件夹的序号
# 是否有识别成功以号码检测为准。
if int(action_datas[int(img_names[index])]['num']) == -1 or \
action_datas[int(img_names[index])]['num'] == None:
shutil.copyfile(os.path.join(self.main_imgs_dir, img_names[index] + '.jpg'),
os.path.join(save_dir,'{}'.format(cls),'{}_.jpg'.format(img_names[index])))
else:
shutil.copyfile(os.path.join(self.main_imgs_dir, img_names[index] + '.jpg'),
os.path.join(save_dir,'{}'.format(cls),'{}_{}.jpg'.format(img_names[index], action_datas[int(img_names[index])]['num'])))
action_datas[int(img_names[index])]['team'] = str(cls)
self.action_datas = action_datas
self.logger.log(24, 'SVHN_Predict.cluster_main_imgs() Finished, consums {}s'.format(cluster_main_imgs_timer.toc()))
| [
"ReID_model.utils.dataset_loader.ReID_imgs_load_by_home_and_away",
"utils.log.Log",
"os.path.exists",
"numpy.histogram",
"os.listdir",
"numpy.where",
"utils.timer.Timer",
"utils_BINGO.K_Means.k_means",
"os.getpid",
"torchvision.transforms.ToTensor",
"SVHN.svhn.load_in_Svhn_model",
"threading.c... | [((1524, 1593), 'CalibrateTransfer.data_preprocess.read_data_from_json_file_v2', 'read_data_from_json_file_v2', (['self.root_path', 'self.file_name', 'self.opt'], {}), '(self.root_path, self.file_name, self.opt)\n', (1551, 1593), False, 'from CalibrateTransfer.data_preprocess import write_data_to_json_file, read_data_from_json_file_v2\n'), ((1839, 1876), 'SVHN.svhn.load_in_Svhn_model', 'load_in_Svhn_model', (['self.Num_Pred_opt'], {}), '(self.Num_Pred_opt)\n', (1857, 1876), False, 'from SVHN.svhn import load_in_Svhn_model\n'), ((1965, 1989), 'queue.Queue', 'Queue', ([], {'maxsize': 'queueSize'}), '(maxsize=queueSize)\n', (1970, 1989), False, 'from queue import Queue\n'), ((2034, 2058), 'queue.Queue', 'Queue', ([], {'maxsize': 'queueSize'}), '(maxsize=queueSize)\n', (2039, 2058), False, 'from queue import Queue\n'), ((2895, 2960), 'os.path.join', 'os.path.join', (['self.root_path', '"""intermediate_results"""', '"""main_imgs"""'], {}), "(self.root_path, 'intermediate_results', 'main_imgs')\n", (2907, 2960), False, 'import os\n'), ((2989, 3053), 'os.path.join', 'os.path.join', (['self.root_path', '"""intermediate_results"""', '"""FMLoader"""'], {}), "(self.root_path, 'intermediate_results', 'FMLoader')\n", (3001, 3053), False, 'import os\n'), ((3696, 3735), 'threading.Thread', 'Thread', ([], {'target': 'self.PreProcess', 'args': '()'}), '(target=self.PreProcess, args=())\n', (3702, 3735), False, 'from threading import Thread, currentThread\n'), ((4103, 4110), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (4108, 4110), False, 'from utils.timer import Timer\n'), ((6663, 6697), 'torch.stack', 'torch.stack', (['rectangle_imgs'], {'dim': '(0)'}), '(rectangle_imgs, dim=0)\n', (6674, 6697), False, 'import torch\n'), ((6792, 6828), 'threading.Thread', 'Thread', ([], {'target': 'self.Predict', 'args': '()'}), '(target=self.Predict, args=())\n', (6798, 6828), False, 'from threading import Thread, currentThread\n'), ((7001, 7008), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (7006, 7008), False, 'from utils.timer import Timer\n'), ((11723, 11875), 'CalibrateTransfer.data_preprocess.write_data_to_json_file', 'write_data_to_json_file', (['self.root_path', 'self.file_name', 'self.action_datas', 'self.parameter'], {'file_save_name': 'self.file_save_name_before_Number_Rectify'}), '(self.root_path, self.file_name, self.action_datas,\n self.parameter, file_save_name=self.file_save_name_before_Number_Rectify)\n', (11746, 11875), False, 'from CalibrateTransfer.data_preprocess import write_data_to_json_file, read_data_from_json_file_v2\n'), ((12148, 12299), 'CalibrateTransfer.data_preprocess.write_data_to_json_file', 'write_data_to_json_file', (['self.root_path', 'self.file_name', 'self.action_datas', 'self.parameter'], {'file_save_name': 'self.file_save_name_after_Number_Rectify'}), '(self.root_path, self.file_name, self.action_datas,\n self.parameter, file_save_name=self.file_save_name_after_Number_Rectify)\n', (12171, 12299), False, 'from CalibrateTransfer.data_preprocess import write_data_to_json_file, read_data_from_json_file_v2\n'), ((12555, 12608), 'os.makedirs', 'os.makedirs', (['intermediate_resutls_path'], {'exist_ok': '(True)'}), '(intermediate_resutls_path, exist_ok=True)\n', (12566, 12608), False, 'import os\n'), ((12989, 13042), 'os.makedirs', 'os.makedirs', (['intermediate_resutls_path'], {'exist_ok': '(True)'}), '(intermediate_resutls_path, exist_ok=True)\n', (13000, 13042), False, 'import os\n'), ((13663, 13697), 'os.path.exists', 'os.path.exists', (['self.main_imgs_dir'], {}), '(self.main_imgs_dir)\n', (13677, 13697), False, 'import os\n'), ((13753, 13784), 'os.makedirs', 'os.makedirs', (['self.main_imgs_dir'], {}), '(self.main_imgs_dir)\n', (13764, 13784), False, 'import os\n'), ((13834, 13858), 'os.path.exists', 'os.path.exists', (['FMLoader'], {}), '(FMLoader)\n', (13848, 13858), False, 'import os\n'), ((14935, 14942), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (14940, 14942), False, 'from utils.timer import Timer\n'), ((15093, 15117), 'ReID_model.modeling.ReID_Model', 'ReID_Model', (['self.ReIDCfg'], {}), '(self.ReIDCfg)\n', (15103, 15117), False, 'from ReID_model.modeling import ReID_Model\n'), ((15596, 15685), 'ReID_model.utils.dataset_loader.ReID_imgs_load_by_home_and_away', 'ReID_imgs_load_by_home_and_away', (['self.ReIDCfg', 'self.main_imgs_dir', 'self.action_datas'], {}), '(self.ReIDCfg, self.main_imgs_dir, self.\n action_datas)\n', (15627, 15685), False, 'from ReID_model.utils.dataset_loader import ReID_imgs_load_by_home_and_away\n'), ((2341, 2376), 'os.path.join', 'os.path.join', (['self.root_path', '"""vis"""'], {}), "(self.root_path, 'vis')\n", (2353, 2376), False, 'import os\n'), ((2389, 2430), 'os.makedirs', 'os.makedirs', (['self.vis_path'], {'exist_ok': '(True)'}), '(self.vis_path, exist_ok=True)\n', (2400, 2430), False, 'import os\n'), ((2726, 2794), 'os.path.join', 'os.path.join', (['self.root_path', '"""intermediate_results"""', '"""SVHN_Predict"""'], {}), "(self.root_path, 'intermediate_results', 'SVHN_Predict')\n", (2738, 2794), False, 'import os\n'), ((2807, 2864), 'os.makedirs', 'os.makedirs', (['self.intermediate_results_dir'], {'exist_ok': '(True)'}), '(self.intermediate_results_dir, exist_ok=True)\n', (2818, 2864), False, 'import os\n'), ((6382, 6407), 'PIL.Image.fromarray', 'Image.fromarray', (['crop_img'], {}), '(crop_img)\n', (6397, 6407), False, 'from PIL import Image\n'), ((12763, 12794), 'json.dump', 'json.dump', (['self.action_datas', 'f'], {}), '(self.action_datas, f)\n', (12772, 12794), False, 'import json\n'), ((13219, 13231), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13228, 13231), False, 'import json\n'), ((13468, 13491), 'os.path.exists', 'os.path.exists', (['sub_dir'], {}), '(sub_dir)\n', (13482, 13491), False, 'import os\n'), ((13544, 13579), 'os.makedirs', 'os.makedirs', (['sub_dir'], {'exist_ok': '(True)'}), '(sub_dir, exist_ok=True)\n', (13555, 13579), False, 'import os\n'), ((13711, 13744), 'shutil.rmtree', 'shutil.rmtree', (['self.main_imgs_dir'], {}), '(self.main_imgs_dir)\n', (13724, 13744), False, 'import shutil\n'), ((13937, 13957), 'os.listdir', 'os.listdir', (['FMLoader'], {}), '(FMLoader)\n', (13947, 13957), False, 'import os\n'), ((15423, 15457), 'os.path.exists', 'os.path.exists', (['self.main_imgs_dir'], {}), '(self.main_imgs_dir)\n', (15437, 15457), False, 'import os\n'), ((16548, 16569), 'utils_BINGO.K_Means.k_means', 'k_means', (['all_feats', '(2)'], {}), '(all_feats, 2)\n', (16555, 16569), False, 'from utils_BINGO.K_Means import k_means\n'), ((2118, 2145), 'torchvision.transforms.Resize', 'transforms.Resize', (['[54, 54]'], {}), '([54, 54])\n', (2135, 2145), False, 'from torchvision import transforms\n'), ((2159, 2180), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2178, 2180), False, 'from torchvision import transforms\n'), ((2194, 2248), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5, 0.5, 0.5]', '[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n', (2214, 2248), False, 'from torchvision import transforms\n'), ((3167, 3196), 'utils.log.Log', 'Log', (['__name__', '"""SVHN_Predict"""'], {}), "(__name__, 'SVHN_Predict')\n", (3170, 3196), False, 'from utils.log import Log\n'), ((3370, 3381), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3379, 3381), False, 'import os\n'), ((3470, 3485), 'threading.currentThread', 'currentThread', ([], {}), '()\n', (3483, 3485), False, 'from threading import Thread, currentThread\n'), ((3964, 3975), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3973, 3975), False, 'import os\n'), ((4058, 4073), 'threading.currentThread', 'currentThread', ([], {}), '()\n', (4071, 4073), False, 'from threading import Thread, currentThread\n'), ((7084, 7095), 'os.getpid', 'os.getpid', ([], {}), '()\n', (7093, 7095), False, 'import os\n'), ((7176, 7191), 'threading.currentThread', 'currentThread', ([], {}), '()\n', (7189, 7191), False, 'from threading import Thread, currentThread\n'), ((13509, 13531), 'shutil.rmtree', 'shutil.rmtree', (['sub_dir'], {}), '(sub_dir)\n', (13522, 13531), False, 'import shutil\n'), ((14178, 14204), 'os.path.exists', 'os.path.exists', (['action_dir'], {}), '(action_dir)\n', (14192, 14204), False, 'import os\n'), ((16057, 16072), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16070, 16072), False, 'import torch\n'), ((8163, 8182), 'utils.dir_related_operation.makedir_v1', 'makedir_v1', (['vis_dir'], {}), '(vis_dir)\n', (8173, 8182), False, 'from utils.dir_related_operation import makedir_v1\n'), ((8316, 8337), 'utils.dir_related_operation.makedir_v1', 'makedir_v1', (['vis_dir_0'], {}), '(vis_dir_0)\n', (8326, 8337), False, 'from utils.dir_related_operation import makedir_v1\n'), ((10479, 10499), 'numpy.argmax', 'np.argmax', (['NumsArray'], {}), '(NumsArray)\n', (10488, 10499), True, 'import numpy as np\n'), ((11946, 12038), 'os.path.join', 'os.path.join', (['self.root_path', '(self.file_save_name_before_Number_Rectify + self.file_name)'], {}), '(self.root_path, self.file_save_name_before_Number_Rectify +\n self.file_name)\n', (11958, 12038), False, 'import os\n'), ((14245, 14278), 'os.path.join', 'os.path.join', (['action_dir', '"""0.jpg"""'], {}), "(action_dir, '0.jpg')\n", (14257, 14278), False, 'import os\n'), ((14402, 14449), 'shutil.copy', 'shutil.copy', (['target_read_path', 'target_save_path'], {}), '(target_read_path, target_save_path)\n', (14413, 14449), False, 'import shutil\n'), ((10397, 10446), 'numpy.histogram', 'np.histogram', (['NumsArray'], {'bins': '(100)', 'range': '(0, 100)'}), '(NumsArray, bins=100, range=(0, 100))\n', (10409, 10446), True, 'import numpy as np\n'), ((16930, 16989), 'os.path.join', 'os.path.join', (['self.main_imgs_dir', "(img_names[index] + '.jpg')"], {}), "(self.main_imgs_dir, img_names[index] + '.jpg')\n", (16942, 16989), False, 'import os\n'), ((17161, 17220), 'os.path.join', 'os.path.join', (['self.main_imgs_dir', "(img_names[index] + '.jpg')"], {}), "(self.main_imgs_dir, img_names[index] + '.jpg')\n", (17173, 17220), False, 'import os\n'), ((10662, 10697), 'numpy.where', 'np.where', (['(NumsArray == preNum_count)'], {}), '(NumsArray == preNum_count)\n', (10670, 10697), True, 'import numpy as np\n')] |
# command time python /gale/ddn/snm3C/humanPFC/code/impute_cell.py --indir /gale/raidix/rdx-5/zhoujt/projects/methylHiC/PFC_batch_merged/smoothed_matrix/1cell/${res0}b_resolution/chr${c}/ --outdir /gale/ddn/snm3C/humanPFC/smoothed_matrix/${res0}b_resolution/chr${c}/ --cell ${sample} --chrom ${c} --res ${res} --chrom_file /gale/netapp/home/zhoujt/genome/hg19/hg19.autosomal.chrom.sizes --mode pad2_std1_rp0.5_sqrtvc
# command time python /gale/ddn/snm3C/humanPFC/code/impute_cell.py --indir /gale/ddn/snm3C/humanPFC/cell_matrix/${res0}b_resolution/chr${c}/ --outdir /gale/ddn/snm3C/humanPFC/smoothed_matrix/${res0}b_resolution/chr${c}/ --cell ${sample} --chrom ${c} --res ${res} --chrom_file /gale/netapp/home/zhoujt/genome/hg19/hg19.autosomal.chrom.sizes --pad 2 --output_dist 10050000 --window_size 40000000 --step_size 10000000 --mode pad2_std1_rp0.5_ws40
import os
import time
import h5py
import cv2
cv2.useOptimized()
import argparse
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, save_npz, diags, eye
from scipy.sparse.linalg import norm
from scipy import ndimage as nd
def impute_cell(indir, outdir, cell, chrom, res, chrom_file,
logscale=False, pad=1, std=1, rp=0.5, tol=0.01, window_size=500000000, step_size=10000000,
output_dist=500000000, output_format='hdf5', mode=None):
def random_walk_cpu(P, rp, tol):
if rp==1:
return P
ngene = P.shape[0]
I = eye(ngene)
Q = P.copy()
start_time = time.time()
for i in range(30):
Q_new = P.dot(Q * (1 - rp) + rp * I)
delta = norm(Q - Q_new)
Q = Q_new.copy()
sparsity = Q.nnz / ngene / ngene
end_time = time.time()
print('Iter', i+1, 'takes', end_time-start_time, 'seconds, loss', delta, 'sparsity', sparsity)
if delta < tol:
break
return Q
def output():
if output_format=='npz':
save_npz(f'{outdir}{cell}_{c}_{mode}.npz', E.astype(np.float32))
else:
f = h5py.File(f'{outdir}{cell}_{c}_{mode}.hdf5', 'w')
g = f.create_group('Matrix')
g.create_dataset('data', data=E.data, dtype='float32', compression='gzip')
g.create_dataset('indices', data=E.indices, dtype=int, compression='gzip')
g.create_dataset('indptr', data=E.indptr, dtype=int, compression='gzip')
g.attrs['shape'] = E.shape
f.close()
return
if not os.path.exists(outdir):
print('Output directory does not exist')
return
if chrom[:3]=='chr':
c = chrom
else:
c = 'chr' + chrom
if not mode:
mode = f'pad{str(pad)}_std{str(std)}_rp{str(rp)}_sqrtvc'
ws = window_size // res
ss = step_size // res
chromsize = pd.read_csv(chrom_file, sep='\t', header=None, index_col=0).to_dict()[1]
start_time = time.time()
ngene = int(chromsize[c] // res) + 1
D = np.loadtxt(f'{indir}{cell}_{c}.txt')
# to avoid bugs on chromosomes with 0/1 read
if len(D)==0:
E = eye(ngene).tocsr()
output()
return
elif len(D.shape)==1:
D = D.reshape(1,-1)
A = csr_matrix((D[:, 2], (D[:, 0], D[:, 1])), shape = (ngene, ngene))
if logscale:
A.data = np.log2(A.data + 1)
end_time = time.time()
print('Loading takes', end_time-start_time, 'seconds')
start_time = time.time()
if pad > 0:
A = cv2.GaussianBlur((A + A.T).astype(np.float32).toarray(), (pad*2+1, pad*2+1), std)
else:
A = (A + A.T).astype(np.float32)
end_time = time.time()
print('Convolution takes', end_time-start_time, 'seconds')
start_time = time.time()
# remove diagonal before rwr
A = csr_matrix(A)
A = A - diags(A.diagonal())
if ws>=ngene or rp==1:
B = A + diags((A.sum(axis=0).A.ravel()==0).astype(int))
d = diags(1 / B.sum(axis=0).A.ravel())
P = d.dot(B)
E = random_walk_cpu(P, rp, tol)
else:
idx = (np.repeat(np.arange(ws), ws), np.tile(np.arange(ws), ws))
idxfilter = (np.abs(idx[1] - idx[0]) < (output_dist // res + 1))
idx = (idx[0][idxfilter], idx[1][idxfilter])
# first filter
idxfilter = ((idx[0] + idx[1]) < (ws + ss))
idx1 = (idx[0][idxfilter], idx[1][idxfilter])
mask1 = csr_matrix((np.ones(len(idx1[0])), (idx1[0], idx1[1])), (ws, ws))
# last filter
idxfilter = ((idx[0] + idx[1]) >= ((ngene-ws) // ss * 2 + 1) * ss + 3 * ws - 2 * ngene)
idx2 = (idx[0][idxfilter], idx[1][idxfilter])
mask2 = csr_matrix((np.ones(len(idx2[0])), (idx2[0], idx2[1])), (ws, ws))
# center filter
idxfilter = np.logical_and((idx[0] + idx[1]) < (ws + ss), (idx[0] + idx[1]) >= (ws - ss))
idx0 = (idx[0][idxfilter], idx[1][idxfilter])
mask0 = csr_matrix((np.ones(len(idx0[0])), (idx0[0], idx0[1])), (ws, ws))
start_time = time.time()
E = csr_matrix(A.shape)
for ll in [x for x in range(0, ngene - ws, ss)] + [ngene - ws]:
B = A[ll:(ll+ws), ll:(ll+ws)]
B = B + diags((B.sum(axis=0).A.ravel()==0).astype(int))
d = diags(1 / B.sum(axis=0).A.ravel())
P = d.dot(B)
Etmp = random_walk_cpu(P, rp, tol)
if ll==0:
E[ll:(ll+ws), ll:(ll+ws)] += Etmp.multiply(mask1)
elif ll==(ngene-ws):
E[ll:(ll+ws), ll:(ll+ws)] += Etmp.multiply(mask2)
else:
E[ll:(ll+ws), ll:(ll+ws)] += Etmp.multiply(mask0)
print('Window', ll)
print('RWR takes', time.time() - start_time, 'seconds')
start_time = time.time()
E = E + E.T
d = E.sum(axis=0).A.ravel()
d[d==0] = 1
b = diags(1 / np.sqrt(d))
E = b.dot(E).dot(b)
print('SQRTVC takes', time.time() - start_time, 'seconds')
# longest distance filter mask
start_time = time.time()
if (output_dist // res + 1) < ngene:
idx = np.triu_indices(E.shape[0], 0)
idxfilter = ((idx[1] - idx[0]) < (output_dist // res + 1))
idx = (idx[0][idxfilter], idx[1][idxfilter])
mask = csr_matrix((np.ones(len(idx[0])), (idx[0], idx[1])), E.shape)
E = E.tocsr().multiply(mask)
print('Filter takes', time.time() - start_time, 'seconds')
output()
return
'''
parser = argparse.ArgumentParser()
parser.add_argument('--indir', type=str, default=None, help='Directory of the contact matrix')
parser.add_argument('--outdir', type=str, default=None, help='Output directory end with /')
parser.add_argument('--cell', type=str, default=None, help='Specific identifier of a cell')
parser.add_argument('--chrom', type=str, default=None, help='Chromosome to impute')
parser.add_argument('--res', type=int, default=None, help='Bin size as integer to generate contact matrix')
parser.add_argument('--chrom_file', type=str, default=None, help='Path to the chromosome size files containing all chromosomes to be analyzed')
parser.add_argument('--logscale', dest='logscale', action='store_true', help='To log transform raw count')
parser.set_defaults(logscale=False)
parser.add_argument('--pad', type=int, default=1, help='Gaussian kernal size')
parser.add_argument('--std', type=float, default=1, help='Gaussian kernal standard deviation')
parser.add_argument('--rp', type=float, default=0.5, help='Restart probability of RWR')
parser.add_argument('--tol', type=float, default=0.01, help='Convergence tolerance of RWR')
parser.add_argument('--window_size', type=int, default=500000000, help='Size of RWR sliding window')
parser.add_argument('--step_size', type=int, default=10000000, help='Step length of RWR sliding window')
parser.add_argument('--output_dist', type=int, default=500000000, help='Maximum distance threshold of contacts when writing output file')
parser.add_argument('--output_format', type=str, default='hdf5', help='Output file format (hdf5 or npz)')
parser.add_argument('--mode', type=str, default=None, help='Suffix of output file name')
opt = parser.parse_args()
impute_cell(opt.indir, opt.outdir, opt.cell, opt.chrom, opt.res, opt.chrom_file,
opt.logscale, opt.pad, opt.std, opt.rp, opt.tol, opt.window_size, opt.step_size,
opt.output_dist, opt.output_format, opt.mode)
'''
| [
"os.path.exists",
"cv2.useOptimized",
"numpy.abs",
"numpy.sqrt",
"numpy.triu_indices",
"scipy.sparse.eye",
"numpy.logical_and",
"pandas.read_csv",
"numpy.log2",
"h5py.File",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.norm",
"numpy.loadtxt",
"time.time",
"numpy.arange"
] | [((906, 924), 'cv2.useOptimized', 'cv2.useOptimized', ([], {}), '()\n', (922, 924), False, 'import cv2\n'), ((2938, 2949), 'time.time', 'time.time', ([], {}), '()\n', (2947, 2949), False, 'import time\n'), ((2999, 3035), 'numpy.loadtxt', 'np.loadtxt', (['f"""{indir}{cell}_{c}.txt"""'], {}), "(f'{indir}{cell}_{c}.txt')\n", (3009, 3035), True, 'import numpy as np\n'), ((3230, 3293), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(D[:, 2], (D[:, 0], D[:, 1]))'], {'shape': '(ngene, ngene)'}), '((D[:, 2], (D[:, 0], D[:, 1])), shape=(ngene, ngene))\n', (3240, 3293), False, 'from scipy.sparse import csr_matrix, save_npz, diags, eye\n'), ((3366, 3377), 'time.time', 'time.time', ([], {}), '()\n', (3375, 3377), False, 'import time\n'), ((3455, 3466), 'time.time', 'time.time', ([], {}), '()\n', (3464, 3466), False, 'import time\n'), ((3644, 3655), 'time.time', 'time.time', ([], {}), '()\n', (3653, 3655), False, 'import time\n'), ((3737, 3748), 'time.time', 'time.time', ([], {}), '()\n', (3746, 3748), False, 'import time\n'), ((3790, 3803), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A'], {}), '(A)\n', (3800, 3803), False, 'from scipy.sparse import csr_matrix, save_npz, diags, eye\n'), ((5718, 5729), 'time.time', 'time.time', ([], {}), '()\n', (5727, 5729), False, 'import time\n'), ((5964, 5975), 'time.time', 'time.time', ([], {}), '()\n', (5973, 5975), False, 'import time\n'), ((1466, 1476), 'scipy.sparse.eye', 'eye', (['ngene'], {}), '(ngene)\n', (1469, 1476), False, 'from scipy.sparse import csr_matrix, save_npz, diags, eye\n'), ((1519, 1530), 'time.time', 'time.time', ([], {}), '()\n', (1528, 1530), False, 'import time\n'), ((2525, 2547), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (2539, 2547), False, 'import os\n'), ((3330, 3349), 'numpy.log2', 'np.log2', (['(A.data + 1)'], {}), '(A.data + 1)\n', (3337, 3349), True, 'import numpy as np\n'), ((4753, 4822), 'numpy.logical_and', 'np.logical_and', (['(idx[0] + idx[1] < ws + ss)', '(idx[0] + idx[1] >= ws - ss)'], {}), '(idx[0] + idx[1] < ws + ss, idx[0] + idx[1] >= ws - ss)\n', (4767, 4822), True, 'import numpy as np\n'), ((4988, 4999), 'time.time', 'time.time', ([], {}), '()\n', (4997, 4999), False, 'import time\n'), ((5012, 5031), 'scipy.sparse.csr_matrix', 'csr_matrix', (['A.shape'], {}), '(A.shape)\n', (5022, 5031), False, 'from scipy.sparse import csr_matrix, save_npz, diags, eye\n'), ((6031, 6061), 'numpy.triu_indices', 'np.triu_indices', (['E.shape[0]', '(0)'], {}), '(E.shape[0], 0)\n', (6046, 6061), True, 'import numpy as np\n'), ((1628, 1643), 'scipy.sparse.linalg.norm', 'norm', (['(Q - Q_new)'], {}), '(Q - Q_new)\n', (1632, 1643), False, 'from scipy.sparse.linalg import norm\n'), ((1741, 1752), 'time.time', 'time.time', ([], {}), '()\n', (1750, 1752), False, 'import time\n'), ((2086, 2135), 'h5py.File', 'h5py.File', (['f"""{outdir}{cell}_{c}_{mode}.hdf5"""', '"""w"""'], {}), "(f'{outdir}{cell}_{c}_{mode}.hdf5', 'w')\n", (2095, 2135), False, 'import h5py\n'), ((4139, 4162), 'numpy.abs', 'np.abs', (['(idx[1] - idx[0])'], {}), '(idx[1] - idx[0])\n', (4145, 4162), True, 'import numpy as np\n'), ((5663, 5674), 'time.time', 'time.time', ([], {}), '()\n', (5672, 5674), False, 'import time\n'), ((5812, 5822), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (5819, 5822), True, 'import numpy as np\n'), ((5874, 5885), 'time.time', 'time.time', ([], {}), '()\n', (5883, 5885), False, 'import time\n'), ((6322, 6333), 'time.time', 'time.time', ([], {}), '()\n', (6331, 6333), False, 'import time\n'), ((2847, 2906), 'pandas.read_csv', 'pd.read_csv', (['chrom_file'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(chrom_file, sep='\\t', header=None, index_col=0)\n", (2858, 2906), True, 'import pandas as pd\n'), ((3115, 3125), 'scipy.sparse.eye', 'eye', (['ngene'], {}), '(ngene)\n', (3118, 3125), False, 'from scipy.sparse import csr_matrix, save_npz, diags, eye\n'), ((4070, 4083), 'numpy.arange', 'np.arange', (['ws'], {}), '(ws)\n', (4079, 4083), True, 'import numpy as np\n'), ((4098, 4111), 'numpy.arange', 'np.arange', (['ws'], {}), '(ws)\n', (4107, 4111), True, 'import numpy as np\n')] |
# _*_ coding: utf-8 _*_
__author__ = 'LelandYan'
__date__ = '2019/5/19 7:42'
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
import skimage as sm
from skimage import morphology
from skimage.feature import peak_local_max
from skimage.io import imshow
from skimage.color import rgb2gray
from skimage.filters.rank import median
from skimage.measure import find_contours
# image = cv2.imread("./raw_data/1.jpg")
# dst = cv2.fastNlMeansDenoisingColored(image,None,10,10,7,21)
# img = cv2.pyrDown(dst, cv2.IMREAD_UNCHANGED)
# # ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY) , 127, 255, cv2.THRESH_BINARY)
# thresh = cv2.adaptiveThreshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
# thresh = median(thresh, sm.morphology.disk(5))
# cv2.namedWindow("thresh", cv2.WINDOW_NORMAL)
# cv2.imshow("thresh", thresh)
# cv2.waitKey()
# cv2.destroyAllWindows()
###################################################################
##################################################################
# kernel = np.ones((3,3),np.uint8)
# opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 1)
# threshold, imgOtsu = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
# cv2.imshow("hull", imgOtsu)
# cv2.waitKey()
# cv2.destroyAllWindows()
# cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
# cv2.imshow("hull", thresh)
# cv2.waitKey()
# cv2.destroyAllWindows()
# findContours函数查找图像里的图形轮廓
# 函数参数thresh是图像对象
# 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构
# 轮廓逼近方法
# 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型
#########################################################################################
image = cv2.imread("./raw_data/4.jpg")
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 1)
opening = cv2.bilateralFilter(opening,9,80,80)
opening = median(opening, sm.morphology.disk(3))
# opening = cv2.morphologyEx(opening,cv2.MORPH_GRADIENT,kernel, iterations = 1)
######################################################################################
th, im_th = cv2.threshold(opening, 220, 255, cv2.THRESH_BINARY_INV)
# sure_bg = cv2.dilate(opening,kernel,iterations=2)
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0, 0), 255)
opening = im_floodfill
opening = cv2.erode(opening,kernel,iterations=7)
#########################################################################################
image, contours, hier = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 创建新的图像black
black = cv2.cvtColor(np.zeros((image.shape[1], image.shape[0]), dtype=np.uint8), cv2.COLOR_GRAY2BGR)
counter = 0
for p,cnt in enumerate(contours):
area = cv2.contourArea(contours[p])
if area < 30:
print("$$$$")
continue
# 轮廓周长也被称为弧长。可以使用函数 cv2.arcLength() 计算得到。这个函数的第二参数可以用来指定对象的形状是闭合的(True) ,还是打开的(一条曲线)
epsilon = 0.01 * cv2.arcLength(cnt, True)
# 函数approxPolyDP来对指定的点集进行逼近,cnt是图像轮廓,epsilon表示的是精度,越小精度越高,因为表示的意思是是原始曲线与近似曲线之间的最大距离。
# 第三个函数参数若为true,则说明近似曲线是闭合的,它的首位都是相连,反之,若为false,则断开。
approx = cv2.approxPolyDP(cnt, epsilon, True)
# convexHull检查一个曲线的凸性缺陷并进行修正,参数cnt是图像轮廓。
hull = cv2.convexHull(cnt)
# 勾画图像原始的轮廓
cv2.drawContours(black, [cnt], -1, (0, 255, 0), 2)
# 用多边形勾画轮廓区域
cv2.drawContours(black, [approx], -1, (255, 255, 0), 2)
# 修正凸性缺陷的轮廓区域
cv2.drawContours(black, [hull], -1, (0, 0, 255), 2)
counter+=1
# 显示图像
print(counter)
plt.imshow(black)
cv2.namedWindow("hull", cv2.WINDOW_NORMAL)
cv2.imshow("hull", black)
cv2.waitKey()
cv2.destroyAllWindows()
from scipy import ndimage as ndi
# labels = dst
distance = ndi.distance_transform_edt(opening) #距离变换
# min_distance:最小的像素在2×min_distance + 1区分离(即峰峰数至少min_distance分隔)。找到峰值的最大数量,使用min_distance = 1。
# exclude_border:不排除峰值在图像的边界
# indices:False会返回和数组相同大小的布尔数组,为True时,会返回峰值的坐标
local_maxi =peak_local_max(distance, exclude_border = 0,min_distance = 12,indices=False,
footprint=np.ones((10, 10)),labels=opening) #寻找峰值
markers = ndi.label(local_maxi)[0] #初始标记点
label_ =morphology.watershed(-distance, markers, mask=opening) #基于距离变换的分水岭算法
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
axes = axes.ravel()
ax0, ax1, ax2, ax3 = axes
ax0.imshow(opening, cmap=plt.cm.gray)#, interpolation='nearest')
ax0.set_title("Original")
ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
ax1.set_title("Distance")
ax2.imshow(sm.morphology.dilation(markers,sm.morphology.square(10)), cmap=plt.cm.Spectral, interpolation='nearest')
ax2.set_title("Markers")
ax3.imshow(label_, cmap=plt.cm.Spectral, interpolation='nearest')
ax3.set_title("Segmented")
for ax in axes:
ax.axis('off')
fig.tight_layout()
plt.show()
| [
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.approxPolyDP",
"matplotlib.pyplot.imshow",
"cv2.threshold",
"cv2.erode",
"cv2.arcLength",
"scipy.ndimage.label",
"cv2.contourArea",
"cv2.waitKey",
"skimage.morphology.watershed",
"scipy.ndimage.distance_transform_edt",
"cv2.drawContours",
"numpy.... | [((1813, 1843), 'cv2.imread', 'cv2.imread', (['"""./raw_data/4.jpg"""'], {}), "('./raw_data/4.jpg')\n", (1823, 1843), False, 'import cv2\n'), ((1851, 1890), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1863, 1890), False, 'import cv2\n'), ((1905, 1973), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (1918, 1973), False, 'import cv2\n'), ((2099, 2124), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2106, 2124), True, 'import numpy as np\n'), ((2133, 2195), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(thresh, cv2.MORPH_OPEN, kernel, iterations=1)\n', (2149, 2195), False, 'import cv2\n'), ((2206, 2245), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['opening', '(9)', '(80)', '(80)'], {}), '(opening, 9, 80, 80)\n', (2225, 2245), False, 'import cv2\n'), ((2471, 2526), 'cv2.threshold', 'cv2.threshold', (['opening', '(220)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(opening, 220, 255, cv2.THRESH_BINARY_INV)\n', (2484, 2526), False, 'import cv2\n'), ((2753, 2787), 'numpy.zeros', 'np.zeros', (['(h + 2, w + 2)', 'np.uint8'], {}), '((h + 2, w + 2), np.uint8)\n', (2761, 2787), True, 'import numpy as np\n'), ((2819, 2865), 'cv2.floodFill', 'cv2.floodFill', (['im_floodfill', 'mask', '(0, 0)', '(255)'], {}), '(im_floodfill, mask, (0, 0), 255)\n', (2832, 2865), False, 'import cv2\n'), ((2900, 2940), 'cv2.erode', 'cv2.erode', (['opening', 'kernel'], {'iterations': '(7)'}), '(opening, kernel, iterations=7)\n', (2909, 2940), False, 'import cv2\n'), ((3053, 3122), 'cv2.findContours', 'cv2.findContours', (['opening', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3069, 3122), False, 'import cv2\n'), ((4049, 4066), 'matplotlib.pyplot.imshow', 'plt.imshow', (['black'], {}), '(black)\n', (4059, 4066), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4110), 'cv2.namedWindow', 'cv2.namedWindow', (['"""hull"""', 'cv2.WINDOW_NORMAL'], {}), "('hull', cv2.WINDOW_NORMAL)\n", (4083, 4110), False, 'import cv2\n'), ((4111, 4136), 'cv2.imshow', 'cv2.imshow', (['"""hull"""', 'black'], {}), "('hull', black)\n", (4121, 4136), False, 'import cv2\n'), ((4137, 4150), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (4148, 4150), False, 'import cv2\n'), ((4151, 4174), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4172, 4174), False, 'import cv2\n'), ((4234, 4269), 'scipy.ndimage.distance_transform_edt', 'ndi.distance_transform_edt', (['opening'], {}), '(opening)\n', (4260, 4269), True, 'from scipy import ndimage as ndi\n'), ((4671, 4725), 'skimage.morphology.watershed', 'morphology.watershed', (['(-distance)', 'markers'], {'mask': 'opening'}), '(-distance, markers, mask=opening)\n', (4691, 4725), False, 'from skimage import morphology\n'), ((4753, 4801), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(12, 12)'}), '(nrows=2, ncols=2, figsize=(12, 12))\n', (4765, 4801), True, 'import matplotlib.pyplot as plt\n'), ((5319, 5329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5327, 5329), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2290), 'skimage.morphology.disk', 'sm.morphology.disk', (['(3)'], {}), '(3)\n', (2287, 2290), True, 'import skimage as sm\n'), ((3158, 3216), 'numpy.zeros', 'np.zeros', (['(image.shape[1], image.shape[0])'], {'dtype': 'np.uint8'}), '((image.shape[1], image.shape[0]), dtype=np.uint8)\n', (3166, 3216), True, 'import numpy as np\n'), ((3297, 3325), 'cv2.contourArea', 'cv2.contourArea', (['contours[p]'], {}), '(contours[p])\n', (3312, 3325), False, 'import cv2\n'), ((3677, 3713), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', 'epsilon', '(True)'], {}), '(cnt, epsilon, True)\n', (3693, 3713), False, 'import cv2\n'), ((3770, 3789), 'cv2.convexHull', 'cv2.convexHull', (['cnt'], {}), '(cnt)\n', (3784, 3789), False, 'import cv2\n'), ((3810, 3860), 'cv2.drawContours', 'cv2.drawContours', (['black', '[cnt]', '(-1)', '(0, 255, 0)', '(2)'], {}), '(black, [cnt], -1, (0, 255, 0), 2)\n', (3826, 3860), False, 'import cv2\n'), ((3882, 3937), 'cv2.drawContours', 'cv2.drawContours', (['black', '[approx]', '(-1)', '(255, 255, 0)', '(2)'], {}), '(black, [approx], -1, (255, 255, 0), 2)\n', (3898, 3937), False, 'import cv2\n'), ((3960, 4011), 'cv2.drawContours', 'cv2.drawContours', (['black', '[hull]', '(-1)', '(0, 0, 255)', '(2)'], {}), '(black, [hull], -1, (0, 0, 255), 2)\n', (3976, 4011), False, 'import cv2\n'), ((4631, 4652), 'scipy.ndimage.label', 'ndi.label', (['local_maxi'], {}), '(local_maxi)\n', (4640, 4652), True, 'from scipy import ndimage as ndi\n'), ((3493, 3517), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (3506, 3517), False, 'import cv2\n'), ((4581, 4598), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4588, 4598), True, 'import numpy as np\n'), ((5072, 5096), 'skimage.morphology.square', 'sm.morphology.square', (['(10)'], {}), '(10)\n', (5092, 5096), True, 'import skimage as sm\n')] |
"""Sub-classes for vtk.vtkRectilinearGrid and vtk.vtkImageData."""
import pathlib
import logging
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import abstract_class
from .dataset import DataSet
from .filters import _get_output, UniformGridFilters
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
@abstract_class
class Grid(DataSet):
"""A class full of common methods for non-pointset grids."""
def __init__(self, *args, **kwargs):
"""Initialize the grid."""
super().__init__()
@property
def dimensions(self):
"""Return a length 3 tuple of the grid's dimensions.
These are effectively the number of nodes along each of the three dataset axes.
"""
return list(self.GetDimensions())
@dimensions.setter
def dimensions(self, dims):
"""Set the dataset dimensions. Pass a length three tuple of integers."""
nx, ny, nz = dims[0], dims[1], dims[2]
self.SetDimensions(nx, ny, nz)
self.Modified()
def _get_attrs(self):
"""Return the representation methods (internal helper)."""
attrs = DataSet._get_attrs(self)
attrs.append(("Dimensions", self.dimensions, "{:d}, {:d}, {:d}"))
return attrs
class RectilinearGrid(_vtk.vtkRectilinearGrid, Grid):
"""Extend the functionality of a vtk.vtkRectilinearGrid object.
Can be initialized in several ways:
- Create empty grid
- Initialize from a vtk.vtkRectilinearGrid object
- Initialize directly from the point arrays
See _from_arrays in the documentation for more details on initializing
from point arrays
Examples
--------
>>> import pyvista
>>> import vtk
>>> import numpy as np
>>> # Create empty grid
>>> grid = pyvista.RectilinearGrid()
>>> # Initialize from a vtk.vtkRectilinearGrid object
>>> vtkgrid = vtk.vtkRectilinearGrid()
>>> grid = pyvista.RectilinearGrid(vtkgrid)
>>> # Create from NumPy arrays
>>> xrng = np.arange(-10, 10, 2)
>>> yrng = np.arange(-10, 10, 5)
>>> zrng = np.arange(-10, 10, 1)
>>> grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
"""
_READERS = {'.vtk': _vtk.vtkRectilinearGridReader,
'.vtr': _vtk.vtkXMLRectilinearGridReader}
_WRITERS = {'.vtk': _vtk.vtkRectilinearGridWriter,
'.vtr': _vtk.vtkXMLRectilinearGridWriter}
def __init__(self, *args, **kwargs):
"""Initialize the rectilinear grid."""
super().__init__()
if len(args) == 1:
if isinstance(args[0], _vtk.vtkRectilinearGrid):
self.deep_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0])
elif isinstance(args[0], np.ndarray):
self._from_arrays(args[0], None, None)
else:
raise TypeError(f'Type ({type(args[0])}) not understood by `RectilinearGrid`')
elif len(args) == 3 or len(args) == 2:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
if len(args) == 3:
arg2_is_arr = isinstance(args[2], np.ndarray)
else:
arg2_is_arr = False
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr]):
self._from_arrays(args[0], args[1], args[2])
elif all([arg0_is_arr, arg1_is_arr]):
self._from_arrays(args[0], args[1], None)
else:
raise TypeError("Arguments not understood by `RectilinearGrid`.")
def __repr__(self):
"""Return the default representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the str representation."""
return DataSet.__str__(self)
def _update_dimensions(self):
"""Update the dimensions if coordinates have changed."""
return self.SetDimensions(len(self.x), len(self.y), len(self.z))
def _from_arrays(self, x, y, z):
"""Create VTK rectilinear grid directly from numpy arrays.
Each array gives the uniques coordinates of the mesh along each axial
direction. To help ensure you are using this correctly, we take the unique
values of each argument.
Parameters
----------
x : np.ndarray
Coordinates of the nodes in x direction.
y : np.ndarray
Coordinates of the nodes in y direction.
z : np.ndarray
Coordinates of the nodes in z direction.
"""
# Set the coordinates along each axial direction
# Must at least be an x array
x = np.unique(x.ravel())
self.SetXCoordinates(_vtk.numpy_to_vtk(x))
if y is not None:
y = np.unique(y.ravel())
self.SetYCoordinates(_vtk.numpy_to_vtk(y))
if z is not None:
z = np.unique(z.ravel())
self.SetZCoordinates(_vtk.numpy_to_vtk(z))
# Ensure dimensions are properly set
self._update_dimensions()
@property
def meshgrid(self):
"""Return a meshgrid of numpy arrays for this mesh.
This simply returns a ``numpy.meshgrid`` of the coordinates for this
mesh in ``ij`` indexing. These are a copy of the points of this mesh.
"""
return np.meshgrid(self.x, self.y, self.z, indexing='ij')
@property
def points(self):
"""Return a copy of the points as an n by 3 numpy array."""
xx, yy, zz = self.meshgrid
return np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')]
@points.setter
def points(self, points):
"""Points must be set along each axial direction.
Please set the point coordinates with the ``x``, ``y``, and ``z``
setters.
This setter overrides the base class's setter to ensure a user does not
attempt to set them.
"""
raise AttributeError("The points cannot be set. The points of "
"`RectilinearGrid` are defined in each axial direction. Please "
"use the `x`, `y`, and `z` setters individually."
)
@property
def x(self):
"""Get the coordinates along the X-direction."""
return _vtk.vtk_to_numpy(self.GetXCoordinates())
@x.setter
def x(self, coords):
"""Set the coordinates along the X-direction."""
self.SetXCoordinates(_vtk.numpy_to_vtk(coords))
self._update_dimensions()
self.Modified()
@property
def y(self):
"""Get the coordinates along the Y-direction."""
return _vtk.vtk_to_numpy(self.GetYCoordinates())
@y.setter
def y(self, coords):
"""Set the coordinates along the Y-direction."""
self.SetYCoordinates(_vtk.numpy_to_vtk(coords))
self._update_dimensions()
self.Modified()
@property
def z(self):
"""Get the coordinates along the Z-direction."""
return _vtk.vtk_to_numpy(self.GetZCoordinates())
@z.setter
def z(self, coords):
"""Set the coordinates along the Z-direction."""
self.SetZCoordinates(_vtk.numpy_to_vtk(coords))
self._update_dimensions()
self.Modified()
@Grid.dimensions.setter # type: ignore
def dimensions(self, dims):
"""Do not let the dimensions of the RectilinearGrid be set."""
raise AttributeError("The dimensions of a `RectilinearGrid` are implicitly defined and thus cannot be set.")
def cast_to_structured_grid(self):
"""Cast this rectilinear grid to a :class:`pyvista.StructuredGrid`."""
alg = _vtk.vtkRectilinearGridToPointSet()
alg.SetInputData(self)
alg.Update()
return _get_output(alg)
class UniformGrid(_vtk.vtkImageData, Grid, UniformGridFilters):
"""Extend the functionality of a vtk.vtkImageData object.
Can be initialized in several ways:
- Create empty grid
- Initialize from a vtk.vtkImageData object
- Initialize directly from the point arrays
See ``_from_specs`` in the documentation for more details on initializing
from point arrays
Examples
--------
>>> import pyvista
>>> import vtk
>>> import numpy as np
>>> # Create empty grid
>>> grid = pyvista.UniformGrid()
>>> # Initialize from a vtk.vtkImageData object
>>> vtkgrid = vtk.vtkImageData()
>>> grid = pyvista.UniformGrid(vtkgrid)
>>> # Using just the grid dimensions
>>> dims = (10, 10, 10)
>>> grid = pyvista.UniformGrid(dims)
>>> # Using dimensions and spacing
>>> spacing = (2, 1, 5)
>>> grid = pyvista.UniformGrid(dims, spacing)
>>> # Using dimensions, spacing, and an origin
>>> origin = (10, 35, 50)
>>> grid = pyvista.UniformGrid(dims, spacing, origin)
"""
_READERS = {'.vtk': _vtk.vtkDataSetReader, '.vti': _vtk.vtkXMLImageDataReader}
_WRITERS = {'.vtk': _vtk.vtkDataSetWriter, '.vti': _vtk.vtkXMLImageDataWriter}
def __init__(self, *args, **kwargs):
"""Initialize the uniform grid."""
super().__init__()
if len(args) == 1:
if isinstance(args[0], _vtk.vtkImageData):
self.deep_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0])
else:
arg0_is_valid = len(args[0]) == 3
self._from_specs(args[0])
elif len(args) > 1 and len(args) < 4:
arg0_is_valid = len(args[0]) == 3
arg1_is_valid = False
if len(args) > 1:
arg1_is_valid = len(args[1]) == 3
arg2_is_valid = False
if len(args) > 2:
arg2_is_valid = len(args[2]) == 3
if all([arg0_is_valid, arg1_is_valid, arg2_is_valid]):
self._from_specs(args[0], args[1], args[2])
elif all([arg0_is_valid, arg1_is_valid]):
self._from_specs(args[0], args[1])
def __repr__(self):
"""Return the default representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the default str representation."""
return DataSet.__str__(self)
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)):
"""Create VTK image data directly from numpy arrays.
A uniform grid is defined by the node spacings for each axis
(uniform along each individual axis) and the number of nodes on each axis.
These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
"""
xn, yn, zn = dims[0], dims[1], dims[2]
xs, ys, zs = spacing[0], spacing[1], spacing[2]
xo, yo, zo = origin[0], origin[1], origin[2]
self.SetDimensions(xn, yn, zn)
self.SetOrigin(xo, yo, zo)
self.SetSpacing(xs, ys, zs)
@property
def points(self):
"""Build a copy of the implicitly defined points as a numpy array."""
# Get grid dimensions
nx, ny, nz = self.dimensions
nx -= 1
ny -= 1
nz -= 1
# get the points and convert to spacings
dx, dy, dz = self.spacing
# Now make the cell arrays
ox, oy, oz = np.array(self.origin) + np.array(self.extent[::2])
x = np.insert(np.cumsum(np.full(nx, dx)), 0, 0.0) + ox
y = np.insert(np.cumsum(np.full(ny, dy)), 0, 0.0) + oy
z = np.insert(np.cumsum(np.full(nz, dz)), 0, 0.0) + oz
xx, yy, zz = np.meshgrid(x,y,z, indexing='ij')
return np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')]
@points.setter
def points(self, points):
"""Points cannot be set.
This setter overrides the base class's setter to ensure a user does not
attempt to set them. See https://github.com/pyvista/pyvista/issues/713.
"""
raise AttributeError("The points cannot be set. The points of "
"`UniformGrid`/`vtkImageData` are implicitly defined by the "
"`origin`, `spacing`, and `dimensions` of the grid."
)
@property
def x(self):
"""Return all the X points."""
return self.points[:, 0]
@property
def y(self):
"""Return all the Y points."""
return self.points[:, 1]
@property
def z(self):
"""Return all the Z points."""
return self.points[:, 2]
@property
def origin(self):
"""Return the origin of the grid (bottom southwest corner)."""
return list(self.GetOrigin())
@origin.setter
def origin(self, origin):
"""Set the origin. Pass a length three tuple of floats."""
ox, oy, oz = origin[0], origin[1], origin[2]
self.SetOrigin(ox, oy, oz)
self.Modified()
@property
def spacing(self):
"""Get the spacing for each axial direction."""
return list(self.GetSpacing())
@spacing.setter
def spacing(self, spacing):
"""Set the spacing in each axial direction.
Pass a length three tuple of floats.
"""
dx, dy, dz = spacing[0], spacing[1], spacing[2]
self.SetSpacing(dx, dy, dz)
self.Modified()
def _get_attrs(self):
"""Return the representation methods (internal helper)."""
attrs = Grid._get_attrs(self)
fmt = "{}, {}, {}".format(*[pyvista.FLOAT_FORMAT]*3)
attrs.append(("Spacing", self.spacing, fmt))
return attrs
def cast_to_structured_grid(self):
"""Cast this uniform grid to a :class:`pyvista.StructuredGrid`."""
alg = _vtk.vtkImageToStructuredGrid()
alg.SetInputData(self)
alg.Update()
return _get_output(alg)
def cast_to_rectilinear_grid(self):
"""Cast this uniform grid to a :class:`pyvista.RectilinearGrid`."""
def gen_coords(i):
coords = np.cumsum(np.insert(np.full(self.dimensions[i] - 1,
self.spacing[i]), 0, 0)
) + self.origin[i]
return coords
xcoords = gen_coords(0)
ycoords = gen_coords(1)
zcoords = gen_coords(2)
grid = pyvista.RectilinearGrid(xcoords, ycoords, zcoords)
grid.point_arrays.update(self.point_arrays)
grid.cell_arrays.update(self.cell_arrays)
grid.field_arrays.update(self.field_arrays)
grid.copy_meta_from(self)
return grid
| [
"logging.getLogger",
"pyvista._vtk.vtkRectilinearGridToPointSet",
"pyvista._vtk.numpy_to_vtk",
"numpy.full",
"pyvista.RectilinearGrid",
"numpy.array",
"pyvista._vtk.vtkImageToStructuredGrid",
"numpy.meshgrid"
] | [((293, 320), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (310, 320), False, 'import logging\n'), ((5379, 5429), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y', 'self.z'], {'indexing': '"""ij"""'}), "(self.x, self.y, self.z, indexing='ij')\n", (5390, 5429), True, 'import numpy as np\n'), ((7671, 7706), 'pyvista._vtk.vtkRectilinearGridToPointSet', '_vtk.vtkRectilinearGridToPointSet', ([], {}), '()\n', (7704, 7706), False, 'from pyvista import _vtk\n'), ((11906, 11941), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {'indexing': '"""ij"""'}), "(x, y, z, indexing='ij')\n", (11917, 11941), True, 'import numpy as np\n'), ((14002, 14033), 'pyvista._vtk.vtkImageToStructuredGrid', '_vtk.vtkImageToStructuredGrid', ([], {}), '()\n', (14031, 14033), False, 'from pyvista import _vtk\n'), ((14595, 14645), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xcoords', 'ycoords', 'zcoords'], {}), '(xcoords, ycoords, zcoords)\n', (14618, 14645), False, 'import pyvista\n'), ((4759, 4779), 'pyvista._vtk.numpy_to_vtk', '_vtk.numpy_to_vtk', (['x'], {}), '(x)\n', (4776, 4779), False, 'from pyvista import _vtk\n'), ((6474, 6499), 'pyvista._vtk.numpy_to_vtk', '_vtk.numpy_to_vtk', (['coords'], {}), '(coords)\n', (6491, 6499), False, 'from pyvista import _vtk\n'), ((6831, 6856), 'pyvista._vtk.numpy_to_vtk', '_vtk.numpy_to_vtk', (['coords'], {}), '(coords)\n', (6848, 6856), False, 'from pyvista import _vtk\n'), ((7188, 7213), 'pyvista._vtk.numpy_to_vtk', '_vtk.numpy_to_vtk', (['coords'], {}), '(coords)\n', (7205, 7213), False, 'from pyvista import _vtk\n'), ((11645, 11666), 'numpy.array', 'np.array', (['self.origin'], {}), '(self.origin)\n', (11653, 11666), True, 'import numpy as np\n'), ((11669, 11695), 'numpy.array', 'np.array', (['self.extent[::2]'], {}), '(self.extent[::2])\n', (11677, 11695), True, 'import numpy as np\n'), ((4877, 4897), 'pyvista._vtk.numpy_to_vtk', '_vtk.numpy_to_vtk', (['y'], {}), '(y)\n', (4894, 4897), False, 'from pyvista import _vtk\n'), ((4995, 5015), 'pyvista._vtk.numpy_to_vtk', '_vtk.numpy_to_vtk', (['z'], {}), '(z)\n', (5012, 5015), False, 'from pyvista import _vtk\n'), ((11728, 11743), 'numpy.full', 'np.full', (['nx', 'dx'], {}), '(nx, dx)\n', (11735, 11743), True, 'import numpy as np\n'), ((11791, 11806), 'numpy.full', 'np.full', (['ny', 'dy'], {}), '(ny, dy)\n', (11798, 11806), True, 'import numpy as np\n'), ((11854, 11869), 'numpy.full', 'np.full', (['nz', 'dz'], {}), '(nz, dz)\n', (11861, 11869), True, 'import numpy as np\n'), ((14303, 14351), 'numpy.full', 'np.full', (['(self.dimensions[i] - 1)', 'self.spacing[i]'], {}), '(self.dimensions[i] - 1, self.spacing[i])\n', (14310, 14351), True, 'import numpy as np\n')] |
from matrix_builder import UserItemMatrix
from repr_learner import RepresentationLearner
from user_matcher import UserMatcher
from user_pool_manager import UserPoolManager
from threading import Lock
import scipy
import pandas as pd
import numpy as np
import thread
import time
import sets
PlayingUserPool = []
PlayingUserLock = Lock()
manager = UserPoolManager()
def release_user(manager):
print('realease user start')
global PlayingUserPool
while True:
PlayingUserLock.acquire()
if len(PlayingUserPool) > 0:
print('release: {}'.format(len(PlayingUserPool)))
manager.push_users(PlayingUserPool)
PlayingUserPool = []
PlayingUserLock.release()
time.sleep(10)
class Recommender:
def __init__(self, n_components=30):
self.n_components = n_components
self.repr_learner = RepresentationLearner.load('./model')
def start(self):
global PlayingUserPool
while True:
cur_users, centroid_users = manager.pop_waiting_users()
if len(cur_users) == 0:
print('no users')
time.sleep(5)
elif len(cur_users) == 6:
print('match randomly: {}'.format(cur_users))
time.sleep(5)
PlayingUserLock.acquire()
PlayingUserPool += cur_users
PlayingUserLock.release()
else:
user_matcher = UserMatcher(10, self.n_components)
user_repr = self.repr_learner.user_representations()
cur_users_repr = user_repr[cur_users]
user_matcher.add_embedding(cur_users_repr, user_ids=np.array(cur_users))
user_matcher.finish()
matched_users = set()
for user in cur_users:
if user in matched_users:
continue
matched = user_matcher.pick(user, 12)
ret = []
for i in matched:
if i in matched_users:
continue
ret.append(i)
if len(ret) == 6:
break
if len(ret) == 6:
for i in ret:
matched_users.add(i)
print('match: {}'.format(ret))
if len(ret) > 0:
PlayingUserLock.acquire()
PlayingUserPool += ret
PlayingUserLock.release()
if __name__ == "__main__":
thread.start_new_thread(manager.fake, ())
thread.start_new_thread(release_user, ())
rec = Recommender()
rec.start() | [
"threading.Lock",
"time.sleep",
"numpy.array",
"user_matcher.UserMatcher",
"user_pool_manager.UserPoolManager",
"thread.start_new_thread",
"repr_learner.RepresentationLearner.load"
] | [((330, 336), 'threading.Lock', 'Lock', ([], {}), '()\n', (334, 336), False, 'from threading import Lock\n'), ((348, 365), 'user_pool_manager.UserPoolManager', 'UserPoolManager', ([], {}), '()\n', (363, 365), False, 'from user_pool_manager import UserPoolManager\n'), ((2655, 2696), 'thread.start_new_thread', 'thread.start_new_thread', (['manager.fake', '()'], {}), '(manager.fake, ())\n', (2678, 2696), False, 'import thread\n'), ((2701, 2742), 'thread.start_new_thread', 'thread.start_new_thread', (['release_user', '()'], {}), '(release_user, ())\n', (2724, 2742), False, 'import thread\n'), ((731, 745), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (741, 745), False, 'import time\n'), ((877, 914), 'repr_learner.RepresentationLearner.load', 'RepresentationLearner.load', (['"""./model"""'], {}), "('./model')\n", (903, 914), False, 'from repr_learner import RepresentationLearner\n'), ((1153, 1166), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1163, 1166), False, 'import time\n'), ((1283, 1296), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1293, 1296), False, 'import time\n'), ((1476, 1510), 'user_matcher.UserMatcher', 'UserMatcher', (['(10)', 'self.n_components'], {}), '(10, self.n_components)\n', (1487, 1510), False, 'from user_matcher import UserMatcher\n'), ((1712, 1731), 'numpy.array', 'np.array', (['cur_users'], {}), '(cur_users)\n', (1720, 1731), True, 'import numpy as np\n')] |
import numpy
import pint.compat
from openff.evaluator import unit
class ParameterGradientKey:
@property
def tag(self):
return self._tag
@property
def smirks(self):
return self._smirks
@property
def attribute(self):
return self._attribute
def __init__(self, tag=None, smirks=None, attribute=None):
self._tag = tag
self._smirks = smirks
self._attribute = attribute
def __getstate__(self):
return {"tag": self._tag, "smirks": self._smirks, "attribute": self._attribute}
def __setstate__(self, state):
self._tag = state["tag"]
self._smirks = state["smirks"]
self._attribute = state["attribute"]
def __str__(self):
return f"tag={self._tag} smirks={self._smirks} attribute={self._attribute}"
def __repr__(self):
return f"<ParameterGradientKey {str(self)}>"
def __hash__(self):
return hash((self._tag, self._smirks, self._attribute))
def __eq__(self, other):
return (
isinstance(other, ParameterGradientKey)
and self._tag == other._tag
and self._smirks == other._smirks
and self._attribute == other._attribute
)
def __ne__(self, other):
return not self.__eq__(other)
class ParameterGradient:
@property
def key(self):
return self._key
@property
def value(self):
return self._value
def __init__(self, key=None, value=None):
self._key = key
self._value = value
def __getstate__(self):
return {
"key": self._key,
"value": self._value,
}
def __setstate__(self, state):
self._key = state["key"]
self._value = state["value"]
def __str__(self):
return f"key=({self._key}) value={self._value}"
def __repr__(self):
return f"<ParameterGradient key={self._key} value={self._value}>"
def __add__(self, other):
"""
Parameters
----------
other: ParameterGradient
"""
if not isinstance(other, ParameterGradient):
raise ValueError("Only ParameterGradient objects can be added together.")
elif other.key != self.key:
raise ValueError(
"Only ParameterGradient objects with the same key can be added together."
)
return ParameterGradient(self.key, self.value + other.value)
def __sub__(self, other):
"""
Parameters
----------
other: ParameterGradient
"""
if not isinstance(other, ParameterGradient):
raise ValueError("Only ParameterGradient objects can be subtracted.")
elif other.key != self.key:
raise ValueError(
"Only ParameterGradient objects with the same key can be subtracted."
)
return ParameterGradient(self.key, self.value - other.value)
def __mul__(self, other):
"""
Parameters
----------
other: float, int, openff.evaluator.unit.Quantity
"""
if (
not isinstance(other, float)
and not isinstance(other, int)
and not isinstance(other, unit.Quantity)
):
raise ValueError(
"ParameterGradient objects can only be multiplied by int's, "
"float's or Quantity objects."
)
return ParameterGradient(self.key, self.value * other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
"""
Parameters
----------
other: float, int, openff.evaluator.unit.Quantity
"""
if (
not isinstance(other, float)
and not isinstance(other, int)
and not isinstance(other, unit.Quantity)
):
raise ValueError(
"ParameterGradient objects can only be divided by int's, "
"float's or Quantity objects."
)
return ParameterGradient(self.key, self.value / other)
def __eq__(self, other):
return (
isinstance(other, ParameterGradient)
and self.key == other.key
and numpy.allclose(self.value, other.value)
)
pint.compat.upcast_types.append(ParameterGradient)
| [
"numpy.allclose"
] | [((4281, 4320), 'numpy.allclose', 'numpy.allclose', (['self.value', 'other.value'], {}), '(self.value, other.value)\n', (4295, 4320), False, 'import numpy\n')] |
"""Define the ExternalCodeComp and ExternalCodeImplicitComp classes."""
from __future__ import print_function
import os
import sys
import numpy.distutils
from numpy.distutils.exec_command import find_executable
from openmdao.core.analysis_error import AnalysisError
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.implicitcomponent import ImplicitComponent
from openmdao.utils.shell_proc import STDOUT, DEV_NULL, ShellProc
from openmdao.utils.general_utils import warn_deprecation
class ExternalCodeDelegate(object):
"""
Handles all the methods related to running a code externally.
Attributes
----------
_comp : ExternalCodeComp or ExternalCodeImplicitComp object
The external code object this delegate is associated with.
"""
def __init__(self, comp):
"""
Initialize.
Parameters
----------
comp : ExternalCodeComp or ExternalCodeImplicitComp object
The external code object this delegate is associated with.
"""
self._comp = comp
def declare_options(self):
"""
Declare options before kwargs are processed in the init method.
Options are declared here because this class is intended to be subclassed by
the end user. The `initialize` method is left available for user-defined options.
"""
comp = self._comp
comp.options.declare('command', [], desc='command to be executed')
comp.options.declare('env_vars', {}, desc='Environment variables required by the command')
comp.options.declare('poll_delay', 0.0, lower=0.0,
desc='Delay between polling for command completion. A value of zero '
'will use an internally computed default')
comp.options.declare('timeout', 0.0, lower=0.0,
desc='Maximum time to wait for command completion. A value of zero '
'implies an infinite wait')
comp.options.declare('external_input_files', [],
desc='(optional) list of input file names to check the existence '
'of before solve_nonlinear')
comp.options.declare('external_output_files', [],
desc='(optional) list of input file names to check the existence of '
'after solve_nonlinear')
comp.options.declare('fail_hard', True,
desc="If True, external code errors raise a 'hard' exception "
"(RuntimeError). Otherwise raise a 'soft' exception "
"(AnalysisError).")
comp.options.declare('allowed_return_codes', [0],
desc="Set of return codes that are considered successful.")
def check_config(self, logger):
"""
Perform optional error checks.
Parameters
----------
logger : object
The object that manages logging output.
"""
# check for the command
comp = self._comp
cmd = [c for c in comp.options['command'] if c.strip()]
if not cmd:
logger.error("The command cannot be empty")
else:
program_to_execute = comp.options['command'][0]
command_full_path = find_executable(program_to_execute)
if not command_full_path:
logger.error("The command to be executed, '%s', "
"cannot be found" % program_to_execute)
# Check for missing input files. This just generates a warning during
# setup, since these files may be generated later during execution.
missing = self._check_for_files(comp.options['external_input_files'])
if missing:
logger.warning("The following input files are missing at setup "
"time: %s" % missing)
def _check_for_files(self, files):
"""
Check that specified files exist.
Parameters
----------
files : iterable
Contains files to check.
Returns
-------
list
List of files that do not exist.
"""
return [path for path in files if not os.path.exists(path)]
def run_component(self, command=None):
"""
Run this component.
User should call this method from their overriden compute method.
Parameters
----------
command : List
Optional command. Otherwise use the command in self.options['command'].
"""
comp = self._comp
if not command:
command = comp.options['command']
comp.return_code = -12345678
if not command:
raise ValueError('Empty command list')
if comp.options['fail_hard']:
err_class = RuntimeError
else:
err_class = AnalysisError
return_code = None
try:
missing = self._check_for_files(comp.options['external_input_files'])
if missing:
raise err_class("The following input files are missing: %s"
% sorted(missing))
return_code, error_msg = self._execute_local(command)
if return_code is None:
raise AnalysisError('Timed out after %s sec.' %
comp.options['timeout'])
elif return_code not in comp.options['allowed_return_codes']:
if isinstance(comp.stderr, str):
if os.path.exists(comp.stderr):
stderrfile = open(comp.stderr, 'r')
error_desc = stderrfile.read()
stderrfile.close()
err_fragment = "\nError Output:\n%s" % error_desc
else:
err_fragment = "\n[stderr %r missing]" % comp.stderr
else:
err_fragment = error_msg
raise err_class('return_code = %d%s' % (return_code,
err_fragment))
missing = self._check_for_files(comp.options['external_output_files'])
if missing:
raise err_class("The following output files are missing: %s"
% sorted(missing))
finally:
comp.return_code = -999999 if return_code is None else return_code
def _execute_local(self, command):
"""
Run the command.
Parameters
----------
command : List
List containing OS command string.
Returns
-------
int
Return Code
str
Error Message
"""
# Check to make sure command exists
comp = self._comp
if isinstance(command, str):
program_to_execute = command
else:
program_to_execute = command[0]
# Suppress message from find_executable function, we'll handle it
numpy.distutils.log.set_verbosity(-1)
command_full_path = find_executable(program_to_execute)
if not command_full_path:
msg = "The command to be executed, '%s', cannot be found" % program_to_execute
raise ValueError(msg)
command_for_shell_proc = command
if sys.platform == 'win32':
command_for_shell_proc = ['cmd.exe', '/c'] + command_for_shell_proc
comp._process = \
ShellProc(command_for_shell_proc, comp.stdin,
comp.stdout, comp.stderr, comp.options['env_vars'])
try:
return_code, error_msg = \
comp._process.wait(comp.options['poll_delay'], comp.options['timeout'])
finally:
comp._process.close_files()
comp._process = None
return (return_code, error_msg)
class ExternalCodeComp(ExplicitComponent):
"""
Run an external code as a component.
Default stdin is the 'null' device, default stdout is the console, and
default stderr is ``error.out``.
Attributes
----------
stdin : str or file object
Input stream external code reads from.
stdout : str or file object
Output stream external code writes to.
stderr : str or file object
Error stream external code writes to.
DEV_NULL : File object
NULL device.
STDOUT : File object
Special value that can be used as the stderr argument to Popen and indicates
that standard error should go into the same handle as standard output.
_external_code_runner: ExternalCodeDelegate object
The delegate object that handles all the running of the external code for this object.
return_code : int
Exit status of the child process.
"""
def __init__(self, **kwargs):
"""
Intialize the ExternalCodeComp component.
Parameters
----------
**kwargs : dict of keyword arguments
Keyword arguments that will be mapped into the Component options.
"""
self._external_code_runner = ExternalCodeDelegate(self)
super(ExternalCodeComp, self).__init__(**kwargs)
self.stdin = DEV_NULL
self.stdout = None
self.stderr = "external_code_comp_error.out"
self.return_code = 0
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
Options are declared here because this class is intended to be subclassed by
the end user. The `initialize` method is left available for user-defined options.
"""
self._external_code_runner.declare_options()
def check_config(self, logger):
"""
Perform optional error checks.
Parameters
----------
logger : object
The object that manages logging output.
"""
# check for the command
self._external_code_runner.check_config(logger)
def compute(self, inputs, outputs):
"""
Run this component.
User should call this method from their overriden compute method.
Parameters
----------
inputs : Vector
Unscaled, dimensional input variables read via inputs[key].
outputs : Vector
Unscaled, dimensional output variables read via outputs[key].
"""
self._external_code_runner.run_component()
class ExternalCode(ExternalCodeComp):
"""
Deprecated.
"""
def __init__(self, *args, **kwargs):
"""
Capture Initialize to throw warning.
Parameters
----------
*args : list
Deprecated arguments.
**kwargs : dict
Deprecated arguments.
"""
warn_deprecation("'ExternalCode' has been deprecated. Use "
"'ExternalCodeComp' instead.")
super(ExternalCode, self).__init__(*args, **kwargs)
class ExternalCodeImplicitComp(ImplicitComponent):
"""
Run an external code as a component.
Default stdin is the 'null' device, default stdout is the console, and
default stderr is ``error.out``.
Attributes
----------
stdin : str or file object
Input stream external code reads from.
stdout : str or file object
Output stream external code writes to.
stderr : str or file object
Error stream external code writes to.
DEV_NULL : File object
NULL device.
STDOUT : File object
Special value that can be used as the stderr argument to Popen and indicates
that standard error should go into the same handle as standard output.
_external_code_runner: ExternalCodeDelegate object
The delegate object that handles all the running of the external code for this object.
return_code : int
Exit status of the child process.
"""
def __init__(self, **kwargs):
"""
Intialize the ExternalCodeComp component.
Parameters
----------
**kwargs : dict of keyword arguments
Keyword arguments that will be mapped into the Component options.
"""
self._external_code_runner = ExternalCodeDelegate(self)
super(ExternalCodeImplicitComp, self).__init__(**kwargs)
self.stdin = DEV_NULL
self.stdout = None
self.stderr = "external_code_comp_error.out"
self.return_code = 0
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
Options are declared here because this class is intended to be subclassed by
the end user. The `initialize` method is left available for user-defined options.
"""
self._external_code_runner.declare_options()
# ImplicitComponent has two separate commands to run.
self.options.declare('command_apply', [],
desc='command to be executed for apply_nonlinear')
self.options.declare('command_solve', [],
desc='command to be executed for solve_nonlinear')
self.options.undeclare('command')
def check_config(self, logger):
"""
Perform optional error checks.
Parameters
----------
logger : object
The object that manages logging output.
"""
self._external_code_runner.check_config(logger)
def apply_nonlinear(self, inputs, outputs, residuals):
"""
Compute residuals given inputs and outputs.
The model is assumed to be in an unscaled state.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
outputs : Vector
unscaled, dimensional output variables read via outputs[key]
residuals : Vector
unscaled, dimensional residuals written to via residuals[key]
"""
command = self.options['command_apply']
if command:
self._external_code_runner.run_component(command=command)
def solve_nonlinear(self, inputs, outputs):
"""
Compute outputs given inputs. The model is assumed to be in an unscaled state.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
outputs : Vector
unscaled, dimensional output variables read via outputs[key]
"""
command = self.options['command_solve']
if command:
self._external_code_runner.run_component(command=command)
| [
"os.path.exists",
"openmdao.core.analysis_error.AnalysisError",
"openmdao.utils.shell_proc.ShellProc",
"numpy.distutils.exec_command.find_executable",
"openmdao.utils.general_utils.warn_deprecation"
] | [((7245, 7280), 'numpy.distutils.exec_command.find_executable', 'find_executable', (['program_to_execute'], {}), '(program_to_execute)\n', (7260, 7280), False, 'from numpy.distutils.exec_command import find_executable\n'), ((7637, 7738), 'openmdao.utils.shell_proc.ShellProc', 'ShellProc', (['command_for_shell_proc', 'comp.stdin', 'comp.stdout', 'comp.stderr', "comp.options['env_vars']"], {}), "(command_for_shell_proc, comp.stdin, comp.stdout, comp.stderr,\n comp.options['env_vars'])\n", (7646, 7738), False, 'from openmdao.utils.shell_proc import STDOUT, DEV_NULL, ShellProc\n'), ((10951, 11043), 'openmdao.utils.general_utils.warn_deprecation', 'warn_deprecation', (['"""\'ExternalCode\' has been deprecated. Use \'ExternalCodeComp\' instead."""'], {}), '(\n "\'ExternalCode\' has been deprecated. Use \'ExternalCodeComp\' instead.")\n', (10967, 11043), False, 'from openmdao.utils.general_utils import warn_deprecation\n'), ((3417, 3452), 'numpy.distutils.exec_command.find_executable', 'find_executable', (['program_to_execute'], {}), '(program_to_execute)\n', (3432, 3452), False, 'from numpy.distutils.exec_command import find_executable\n'), ((5428, 5494), 'openmdao.core.analysis_error.AnalysisError', 'AnalysisError', (["('Timed out after %s sec.' % comp.options['timeout'])"], {}), "('Timed out after %s sec.' % comp.options['timeout'])\n", (5441, 5494), False, 'from openmdao.core.analysis_error import AnalysisError\n'), ((4350, 4370), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4364, 4370), False, 'import os\n'), ((5678, 5705), 'os.path.exists', 'os.path.exists', (['comp.stderr'], {}), '(comp.stderr)\n', (5692, 5705), False, 'import os\n')] |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for metrics layers."""
from absl.testing import absltest
import numpy as np
from trax import shapes
import trax.layers as tl
from trax.layers import metrics
class MetricsTest(absltest.TestCase):
def test_cross_entropy(self):
layer = metrics._CrossEntropy()
xs = [np.ones((9, 4, 4, 20)),
np.ones((9, 4, 4))]
y = layer(xs)
self.assertEqual(y.shape, (9, 4, 4))
def test_accuracy(self):
layer = metrics._Accuracy()
xs = [np.ones((9, 4, 4, 20)),
np.ones((9, 4, 4))]
y = layer(xs)
self.assertEqual(y.shape, (9, 4, 4))
def test_weighted_mean_shape(self):
layer = metrics._WeightedMean()
xs = [np.ones((9, 4, 4, 20)),
np.ones((9, 4, 4, 20))]
y = layer(xs)
self.assertEqual(y.shape, ())
def test_weighted_mean_semantics(self):
layer = metrics._WeightedMean()
sample_input = np.ones((3,))
sample_weights = np.ones((3,))
layer.init(shapes.signature([sample_input, sample_weights]))
x = np.array([1., 2., 3.])
weights = np.array([1., 1., 1.])
mean = layer((x, weights))
np.testing.assert_allclose(mean, 2.)
weights = np.array([0., 0., 1.])
mean = layer((x, weights))
np.testing.assert_allclose(mean, 3.)
weights = np.array([1., 0., 0.])
mean = layer((x, weights))
np.testing.assert_allclose(mean, 1.)
def test_weighted_sequence_mean_semantics(self):
layer = metrics._WeightedSequenceMean()
sample_input = np.ones((2, 3))
sample_weights = np.ones((3,))
full_signature = shapes.signature([sample_input, sample_weights])
layer.init(full_signature)
x = np.array([[1., 1., 1.], [1., 1., 0.]])
weights = np.array([1., 1., 1.])
mean = layer((x, weights))
np.testing.assert_allclose(mean, 0.5)
weights = np.array([1., 1., 0.])
mean = layer((x, weights))
np.testing.assert_allclose(mean, 1.)
def test_cross_entropy_loss(self):
layer = tl.CrossEntropyLoss()
xs = [np.ones((9, 4, 4, 20)),
np.ones((9, 4, 4)),
np.ones((9, 4, 4))]
y = layer(xs)
self.assertEqual(y.shape, ())
def test_accuracy_scalar(self):
layer = tl.AccuracyScalar()
xs = [np.ones((9, 4, 4, 20)),
np.ones((9, 4, 4)),
np.ones((9, 4, 4))]
y = layer(xs)
self.assertEqual(y.shape, ())
def test_l2_loss(self):
layer = tl.L2Loss()
sample_input = np.ones((2, 2))
sample_target = np.ones((2, 2))
sample_weights = np.ones((2, 2))
full_signature = shapes.signature([sample_input,
sample_target,
sample_weights])
layer.init(full_signature)
x = np.array([[1., 1.], [1., 1.]])
target = np.array([[1., 1.], [1., 0.]])
weights = np.array([[1., 1.], [1., 0.]])
loss = layer((x, target, weights))
np.testing.assert_allclose(loss, 0.0)
weights = np.array([[1., 0.], [0., 1.]])
loss = layer((x, target, weights))
np.testing.assert_allclose(loss, 0.5)
if __name__ == '__main__':
absltest.main()
| [
"trax.layers.metrics._Accuracy",
"numpy.ones",
"trax.shapes.signature",
"numpy.testing.assert_allclose",
"trax.layers.metrics._WeightedMean",
"absl.testing.absltest.main",
"numpy.array",
"trax.layers.AccuracyScalar",
"trax.layers.metrics._WeightedSequenceMean",
"trax.layers.CrossEntropyLoss",
"t... | [((3654, 3669), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3667, 3669), False, 'from absl.testing import absltest\n'), ((869, 892), 'trax.layers.metrics._CrossEntropy', 'metrics._CrossEntropy', ([], {}), '()\n', (890, 892), False, 'from trax.layers import metrics\n'), ((1056, 1075), 'trax.layers.metrics._Accuracy', 'metrics._Accuracy', ([], {}), '()\n', (1073, 1075), False, 'from trax.layers import metrics\n'), ((1250, 1273), 'trax.layers.metrics._WeightedMean', 'metrics._WeightedMean', ([], {}), '()\n', (1271, 1273), False, 'from trax.layers import metrics\n'), ((1449, 1472), 'trax.layers.metrics._WeightedMean', 'metrics._WeightedMean', ([], {}), '()\n', (1470, 1472), False, 'from trax.layers import metrics\n'), ((1492, 1505), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (1499, 1505), True, 'import numpy as np\n'), ((1527, 1540), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (1534, 1540), True, 'import numpy as np\n'), ((1615, 1640), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (1623, 1640), True, 'import numpy as np\n'), ((1652, 1677), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (1660, 1677), True, 'import numpy as np\n'), ((1710, 1747), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean', '(2.0)'], {}), '(mean, 2.0)\n', (1736, 1747), True, 'import numpy as np\n'), ((1762, 1787), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (1770, 1787), True, 'import numpy as np\n'), ((1820, 1857), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean', '(3.0)'], {}), '(mean, 3.0)\n', (1846, 1857), True, 'import numpy as np\n'), ((1872, 1897), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1880, 1897), True, 'import numpy as np\n'), ((1930, 1967), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean', '(1.0)'], {}), '(mean, 1.0)\n', (1956, 1967), True, 'import numpy as np\n'), ((2031, 2062), 'trax.layers.metrics._WeightedSequenceMean', 'metrics._WeightedSequenceMean', ([], {}), '()\n', (2060, 2062), False, 'from trax.layers import metrics\n'), ((2082, 2097), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (2089, 2097), True, 'import numpy as np\n'), ((2119, 2132), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (2126, 2132), True, 'import numpy as np\n'), ((2154, 2202), 'trax.shapes.signature', 'shapes.signature', (['[sample_input, sample_weights]'], {}), '([sample_input, sample_weights])\n', (2170, 2202), False, 'from trax import shapes\n'), ((2243, 2287), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [1.0, 1.0, 0.0]]'], {}), '([[1.0, 1.0, 1.0], [1.0, 1.0, 0.0]])\n', (2251, 2287), True, 'import numpy as np\n'), ((2296, 2321), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2304, 2321), True, 'import numpy as np\n'), ((2354, 2391), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean', '(0.5)'], {}), '(mean, 0.5)\n', (2380, 2391), True, 'import numpy as np\n'), ((2407, 2432), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 0.0])\n', (2415, 2432), True, 'import numpy as np\n'), ((2465, 2502), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean', '(1.0)'], {}), '(mean, 1.0)\n', (2491, 2502), True, 'import numpy as np\n'), ((2552, 2573), 'trax.layers.CrossEntropyLoss', 'tl.CrossEntropyLoss', ([], {}), '()\n', (2571, 2573), True, 'import trax.layers as tl\n'), ((2767, 2786), 'trax.layers.AccuracyScalar', 'tl.AccuracyScalar', ([], {}), '()\n', (2784, 2786), True, 'import trax.layers as tl\n'), ((2972, 2983), 'trax.layers.L2Loss', 'tl.L2Loss', ([], {}), '()\n', (2981, 2983), True, 'import trax.layers as tl\n'), ((3003, 3018), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3010, 3018), True, 'import numpy as np\n'), ((3039, 3054), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3046, 3054), True, 'import numpy as np\n'), ((3076, 3091), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3083, 3091), True, 'import numpy as np\n'), ((3113, 3176), 'trax.shapes.signature', 'shapes.signature', (['[sample_input, sample_target, sample_weights]'], {}), '([sample_input, sample_target, sample_weights])\n', (3129, 3176), False, 'from trax import shapes\n'), ((3295, 3329), 'numpy.array', 'np.array', (['[[1.0, 1.0], [1.0, 1.0]]'], {}), '([[1.0, 1.0], [1.0, 1.0]])\n', (3303, 3329), True, 'import numpy as np\n'), ((3339, 3373), 'numpy.array', 'np.array', (['[[1.0, 1.0], [1.0, 0.0]]'], {}), '([[1.0, 1.0], [1.0, 0.0]])\n', (3347, 3373), True, 'import numpy as np\n'), ((3384, 3418), 'numpy.array', 'np.array', (['[[1.0, 1.0], [1.0, 0.0]]'], {}), '([[1.0, 1.0], [1.0, 0.0]])\n', (3392, 3418), True, 'import numpy as np\n'), ((3458, 3495), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['loss', '(0.0)'], {}), '(loss, 0.0)\n', (3484, 3495), True, 'import numpy as np\n'), ((3511, 3545), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (3519, 3545), True, 'import numpy as np\n'), ((3585, 3622), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['loss', '(0.5)'], {}), '(loss, 0.5)\n', (3611, 3622), True, 'import numpy as np\n'), ((903, 925), 'numpy.ones', 'np.ones', (['(9, 4, 4, 20)'], {}), '((9, 4, 4, 20))\n', (910, 925), True, 'import numpy as np\n'), ((937, 955), 'numpy.ones', 'np.ones', (['(9, 4, 4)'], {}), '((9, 4, 4))\n', (944, 955), True, 'import numpy as np\n'), ((1086, 1108), 'numpy.ones', 'np.ones', (['(9, 4, 4, 20)'], {}), '((9, 4, 4, 20))\n', (1093, 1108), True, 'import numpy as np\n'), ((1120, 1138), 'numpy.ones', 'np.ones', (['(9, 4, 4)'], {}), '((9, 4, 4))\n', (1127, 1138), True, 'import numpy as np\n'), ((1284, 1306), 'numpy.ones', 'np.ones', (['(9, 4, 4, 20)'], {}), '((9, 4, 4, 20))\n', (1291, 1306), True, 'import numpy as np\n'), ((1318, 1340), 'numpy.ones', 'np.ones', (['(9, 4, 4, 20)'], {}), '((9, 4, 4, 20))\n', (1325, 1340), True, 'import numpy as np\n'), ((1556, 1604), 'trax.shapes.signature', 'shapes.signature', (['[sample_input, sample_weights]'], {}), '([sample_input, sample_weights])\n', (1572, 1604), False, 'from trax import shapes\n'), ((2584, 2606), 'numpy.ones', 'np.ones', (['(9, 4, 4, 20)'], {}), '((9, 4, 4, 20))\n', (2591, 2606), True, 'import numpy as np\n'), ((2618, 2636), 'numpy.ones', 'np.ones', (['(9, 4, 4)'], {}), '((9, 4, 4))\n', (2625, 2636), True, 'import numpy as np\n'), ((2648, 2666), 'numpy.ones', 'np.ones', (['(9, 4, 4)'], {}), '((9, 4, 4))\n', (2655, 2666), True, 'import numpy as np\n'), ((2797, 2819), 'numpy.ones', 'np.ones', (['(9, 4, 4, 20)'], {}), '((9, 4, 4, 20))\n', (2804, 2819), True, 'import numpy as np\n'), ((2831, 2849), 'numpy.ones', 'np.ones', (['(9, 4, 4)'], {}), '((9, 4, 4))\n', (2838, 2849), True, 'import numpy as np\n'), ((2861, 2879), 'numpy.ones', 'np.ones', (['(9, 4, 4)'], {}), '((9, 4, 4))\n', (2868, 2879), True, 'import numpy as np\n')] |
import copy
import numpy as np
import sys
import vnmrjpy as vj
import time
class Admm():
"""Alternating Direction Method of Multipliers solver for Aloha
Tuned for ALOHA MRI reconstruction framework, not for general use yet.
Lmafit estimates the rank, then Admm is used to enforce the hankel structure
refs.: Aloha papers ?
Admm paper ?
"""
def __init__(self,U,V,fiber_stage_known,stage,rp,\
mu=1000,\
realtimeplot=False,\
noiseless=True,\
device='CPU'):
"""Initialize solver
Args:
U, V (np.matrix) U*V.H is the estimated hankel matrix from Lmafit
slice3d_cs_weighted :
slice3d_shape :
stage (int) : pyramidal decomposition stage ?? WHY??
rp {dictionary} : Aloha recon parameters
realitmeplot (Boolean) option to plot each iteration
"""
#TODO change the old hankel compose-decompose to the new one
fiber_shape = rp['fiber_shape']
#removed np matrix
# a.H syntax is changed to a.conj().T
#U = np.matrix(U)
#V = np.matrix(V)
# make ankel mask out of known elmenets
hankel_mask = np.absolute(vj.aloha.construct_hankel(fiber_stage_known,rp))
hankel_mask[hankel_mask != 0] = 1
hankel_mask = np.array(hankel_mask,dtype='complex64')
hankel_mask_inv = np.ones(hankel_mask.shape) - hankel_mask
self.hankel_mask = hankel_mask
self.hankel_mask_inv = hankel_mask_inv
# real time plotting for debugging purposes
self.realtimeplot = realtimeplot
if realtimeplot == True:
self.rtplot = vj.util.RealTimeImshow(np.absolute(U.dot(V.conj().T)))
# putting initpars into tuple
self.initpars = (U,V,fiber_stage_known,stage,rp,mu,noiseless)
def solve(self,max_iter=100):
"""The actual Admm iteration.
Returns:
hankel = U.dot(V.H) (np.matrix)
"""
(U,V,fiber_stage,s,rp,mu,noiseless) = self.initpars
hankel = U.dot(V.conj().T)
fiber_orig_part = copy.deepcopy(fiber_stage)
# init lagrangian update
lagr = np.zeros(hankel.shape,dtype='complex64')
#lagr = copy.deepcopy(hankel)
us = (U.conj().T.dot(U)).shape
vs = (V.conj().T.dot(V)).shape
Iu = np.eye(us[0],us[1],dtype='complex64')
Iv = np.eye(vs[0],vs[0],dtype='complex64')
for _ in range(max_iter):
#start = time.time()
# taking the averages from tha hankel structure and rebuild
hankel_inferred_part = np.multiply(\
U.dot(V.conj().T)-lagr,self.hankel_mask_inv)
#dtime = time.time()
fiber_inferred_part = vj.aloha.deconstruct_hankel(\
hankel_inferred_part,s,rp)
#print('deconstruct time {}'.format(time.time()-dtime))
fiber = fiber_orig_part + fiber_inferred_part
hankel = vj.aloha.construct_hankel(fiber,rp)
# updating U,V and the lagrangian
#TODO consider multidot....
U = mu*(hankel+lagr).dot(V).dot(\
np.linalg.inv(Iv+mu*V.conj().T.dot(V)))
V = mu*((hankel+lagr).conj().T).dot(U).dot(\
np.linalg.inv(Iu+mu*U.conj().T.dot(U)))
lagr = hankel - U.dot(V.conj().T) + lagr
if self.realtimeplot == True:
self.rtplot.update_data(np.absolute(U.dot(V.conj().T)))
#print('admm iter time {}'.format(time.time()-start))
return U.dot(V.conj().T)
| [
"vnmrjpy.aloha.construct_hankel",
"numpy.eye",
"vnmrjpy.aloha.deconstruct_hankel",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"copy.deepcopy"
] | [((1379, 1419), 'numpy.array', 'np.array', (['hankel_mask'], {'dtype': '"""complex64"""'}), "(hankel_mask, dtype='complex64')\n", (1387, 1419), True, 'import numpy as np\n'), ((2167, 2193), 'copy.deepcopy', 'copy.deepcopy', (['fiber_stage'], {}), '(fiber_stage)\n', (2180, 2193), False, 'import copy\n'), ((2242, 2283), 'numpy.zeros', 'np.zeros', (['hankel.shape'], {'dtype': '"""complex64"""'}), "(hankel.shape, dtype='complex64')\n", (2250, 2283), True, 'import numpy as np\n'), ((2412, 2451), 'numpy.eye', 'np.eye', (['us[0]', 'us[1]'], {'dtype': '"""complex64"""'}), "(us[0], us[1], dtype='complex64')\n", (2418, 2451), True, 'import numpy as np\n'), ((2463, 2502), 'numpy.eye', 'np.eye', (['vs[0]', 'vs[0]'], {'dtype': '"""complex64"""'}), "(vs[0], vs[0], dtype='complex64')\n", (2469, 2502), True, 'import numpy as np\n'), ((1266, 1314), 'vnmrjpy.aloha.construct_hankel', 'vj.aloha.construct_hankel', (['fiber_stage_known', 'rp'], {}), '(fiber_stage_known, rp)\n', (1291, 1314), True, 'import vnmrjpy as vj\n'), ((1445, 1471), 'numpy.ones', 'np.ones', (['hankel_mask.shape'], {}), '(hankel_mask.shape)\n', (1452, 1471), True, 'import numpy as np\n'), ((2846, 2902), 'vnmrjpy.aloha.deconstruct_hankel', 'vj.aloha.deconstruct_hankel', (['hankel_inferred_part', 's', 'rp'], {}), '(hankel_inferred_part, s, rp)\n', (2873, 2902), True, 'import vnmrjpy as vj\n'), ((3082, 3118), 'vnmrjpy.aloha.construct_hankel', 'vj.aloha.construct_hankel', (['fiber', 'rp'], {}), '(fiber, rp)\n', (3107, 3118), True, 'import vnmrjpy as vj\n')] |
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('weights/chatbot_model.h5')
import json
import random
intents = json.loads(open('weights/job_intents.json', encoding='utf-8').read())
words = pickle.load(open('weights/words.pkl','rb'))
classes = pickle.load(open('weights/classes.pkl','rb'))
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words, show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
else:
result = "You must ask the right questions"
return result
def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
| [
"random.choice",
"keras.models.load_model",
"nltk.word_tokenize",
"nltk.download",
"nltk.stem.WordNetLemmatizer",
"numpy.array"
] | [((12, 34), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (25, 34), False, 'import nltk\n'), ((35, 59), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (48, 59), False, 'import nltk\n'), ((60, 84), 'nltk.download', 'nltk.download', (['"""omw-1.4"""'], {}), "('omw-1.4')\n", (73, 84), False, 'import nltk\n'), ((138, 157), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (155, 157), False, 'from nltk.stem import WordNetLemmatizer\n'), ((236, 274), 'keras.models.load_model', 'load_model', (['"""weights/chatbot_model.h5"""'], {}), "('weights/chatbot_model.h5')\n", (246, 274), False, 'from keras.models import load_model\n'), ((545, 573), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (563, 573), False, 'import nltk\n'), ((1264, 1277), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (1272, 1277), True, 'import numpy as np\n'), ((1436, 1449), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (1444, 1449), True, 'import numpy as np\n'), ((1976, 2005), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (1989, 2005), False, 'import random\n')] |
import uuid
class Pedestrian:
def __init__(self,bbox,label,confidence):
self.id = str(uuid.uuid4())
self.bbox = bbox
self.label = label
self.centroid = find_centroid(bbox)
self.confidence = confidence
def find_centroid(self,bbox):
'''This function computes the coordinates of the center of a given bbox'''
x_centroid = int((bbox[0]+bbox[2])/2) #x coordinate
y_centroid = int((bbox[1]+bbox[3])/2) #y coordinate
centroid = [x_centroid,y_centroid] #list
return centroid
def update_ped(self,matched_bbox):
self.bbox = matched_bbox
self.centroid = find_centroid(matched_bbox)
class Scene:
def __init__(self,ped_list):
self.ped_list = ped_list
self.unmatched_tracker = []
def count(self):
return len(self.ped_list), len(self.unmatched_tracker)
def update(self,new_ped_list,new_unmatched_tracker):
self.ped_list = new_ped_list
self.unmatched_tracker = new_unmatched_tracker
'''
Implement and test tracker
'''
import numpy as np
from numpy import dot
from scipy.linalg import inv, block_diag
class KalmanTracker(): # class for Kalman Filter-based tracker
def __init__(self):
# Initialize parametes for tracker (history)
self.id = 0 # tracker's id
self.box = [] # list to store the coordinates for a bounding box
self.hits = 0 # number of detection matches
self.no_losses = 0 # number of unmatched tracks (track loss)
# Initialize parameters for Kalman Filtering
# The state is the (x, y) coordinates of the detection box
# state: [up, up_dot, left, left_dot, down, down_dot, right, right_dot]
# or[up, up_dot, left, left_dot, height, height_dot, width, width_dot]
self.x_state=[]
self.dt = 1. # time interval
# Process matrix, assuming constant velocity model
self.F = np.array([[1, self.dt, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, self.dt, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, self.dt, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, self.dt],
[0, 0, 0, 0, 0, 0, 0, 1]])
# Measurement matrix, assuming we can only measure the coordinates
self.H = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0]])
# Initialize the state covariance
self.L = 10.0
self.P = np.diag(self.L*np.ones(8))
# Initialize the process covariance
self.Q_comp_mat = np.array([[self.dt**4/4., self.dt**3/2.],
[self.dt**3/2., self.dt**2]])
self.Q = block_diag(self.Q_comp_mat, self.Q_comp_mat,
self.Q_comp_mat, self.Q_comp_mat)
# Initialize the measurement covariance
self.R_scaler = 1.0
self.R_diag_array = self.R_scaler * np.array([self.L, self.L, self.L, self.L])
self.R = np.diag(self.R_diag_array)
def update_R(self):
R_diag_array = self.R_scaler * np.array([self.L, self.L, self.L, self.L])
self.R = np.diag(R_diag_array)
def kalman_filter(self, z):
'''
Implement the Kalman Filter, including the predict and the update stages,
with the measurement z
'''
x = self.x_state
# Predict
x = dot(self.F, x)
self.P = dot(self.F, self.P).dot(self.F.T) + self.Q
#Update
S = dot(self.H, self.P).dot(self.H.T) + self.R
K = dot(self.P, self.H.T).dot(inv(S)) # Kalman gain
y = z - dot(self.H, x) # residual
x += dot(K, y)
self.P = self.P - dot(K, self.H).dot(self.P)
self.x_state = x.astype(int) # convert to integer coordinates
#(pixel values)
def predict_only(self):
'''
Implment only the predict stage. This is used for unmatched detections and
unmatched tracks
'''
x = self.x_state
# Predict
x = dot(self.F, x)
self.P = dot(self.F, self.P).dot(self.F.T) + self.Q
self.x_state = x.astype(int)
| [
"numpy.ones",
"uuid.uuid4",
"numpy.diag",
"numpy.array",
"numpy.dot",
"scipy.linalg.block_diag",
"scipy.linalg.inv"
] | [((1922, 2179), 'numpy.array', 'np.array', (['[[1, self.dt, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, self.\n dt, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, self.dt, 0, \n 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, self.dt], [0, 0, 0,\n 0, 0, 0, 0, 1]]'], {}), '([[1, self.dt, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, self.dt, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, self\n .dt, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, self.dt], [\n 0, 0, 0, 0, 0, 0, 0, 1]])\n', (1930, 2179), True, 'import numpy as np\n'), ((2502, 2621), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, \n 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0]])\n', (2510, 2621), True, 'import numpy as np\n'), ((2913, 3006), 'numpy.array', 'np.array', (['[[self.dt ** 4 / 4.0, self.dt ** 3 / 2.0], [self.dt ** 3 / 2.0, self.dt ** 2]]'], {}), '([[self.dt ** 4 / 4.0, self.dt ** 3 / 2.0], [self.dt ** 3 / 2.0, \n self.dt ** 2]])\n', (2921, 3006), True, 'import numpy as np\n'), ((3038, 3116), 'scipy.linalg.block_diag', 'block_diag', (['self.Q_comp_mat', 'self.Q_comp_mat', 'self.Q_comp_mat', 'self.Q_comp_mat'], {}), '(self.Q_comp_mat, self.Q_comp_mat, self.Q_comp_mat, self.Q_comp_mat)\n', (3048, 3116), False, 'from scipy.linalg import inv, block_diag\n'), ((3335, 3361), 'numpy.diag', 'np.diag', (['self.R_diag_array'], {}), '(self.R_diag_array)\n', (3342, 3361), True, 'import numpy as np\n'), ((3506, 3527), 'numpy.diag', 'np.diag', (['R_diag_array'], {}), '(R_diag_array)\n', (3513, 3527), True, 'import numpy as np\n'), ((3789, 3803), 'numpy.dot', 'dot', (['self.F', 'x'], {}), '(self.F, x)\n', (3792, 3803), False, 'from numpy import dot\n'), ((4051, 4060), 'numpy.dot', 'dot', (['K', 'y'], {}), '(K, y)\n', (4054, 4060), False, 'from numpy import dot\n'), ((4465, 4479), 'numpy.dot', 'dot', (['self.F', 'x'], {}), '(self.F, x)\n', (4468, 4479), False, 'from numpy import dot\n'), ((97, 109), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (107, 109), False, 'import uuid\n'), ((3275, 3317), 'numpy.array', 'np.array', (['[self.L, self.L, self.L, self.L]'], {}), '([self.L, self.L, self.L, self.L])\n', (3283, 3317), True, 'import numpy as np\n'), ((3446, 3488), 'numpy.array', 'np.array', (['[self.L, self.L, self.L, self.L]'], {}), '([self.L, self.L, self.L, self.L])\n', (3454, 3488), True, 'import numpy as np\n'), ((3974, 3980), 'scipy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (3977, 3980), False, 'from scipy.linalg import inv, block_diag\n'), ((4012, 4026), 'numpy.dot', 'dot', (['self.H', 'x'], {}), '(self.H, x)\n', (4015, 4026), False, 'from numpy import dot\n'), ((2813, 2823), 'numpy.ones', 'np.ones', (['(8)'], {}), '(8)\n', (2820, 2823), True, 'import numpy as np\n'), ((3948, 3969), 'numpy.dot', 'dot', (['self.P', 'self.H.T'], {}), '(self.P, self.H.T)\n', (3951, 3969), False, 'from numpy import dot\n'), ((3821, 3840), 'numpy.dot', 'dot', (['self.F', 'self.P'], {}), '(self.F, self.P)\n', (3824, 3840), False, 'from numpy import dot\n'), ((3893, 3912), 'numpy.dot', 'dot', (['self.H', 'self.P'], {}), '(self.H, self.P)\n', (3896, 3912), False, 'from numpy import dot\n'), ((4087, 4101), 'numpy.dot', 'dot', (['K', 'self.H'], {}), '(K, self.H)\n', (4090, 4101), False, 'from numpy import dot\n'), ((4497, 4516), 'numpy.dot', 'dot', (['self.F', 'self.P'], {}), '(self.F, self.P)\n', (4500, 4516), False, 'from numpy import dot\n')] |
import matplotlib.pyplot as plt
import os
import numpy as np
from scipy import ndimage
from eratosthenes.generic.mapping_io import read_geo_image, make_geo_im
from eratosthenes.generic.mapping_tools import \
pix2map
from eratosthenes.generic.handler_im import \
bilinear_interpolation, rescale_image
from eratosthenes.generic.terrain_tools import \
ridge_orientation, terrain_curvature
from eratosthenes.generic.gis_tools import get_mask_boundary
from eratosthenes.input.read_sentinel2 import \
list_central_wavelength_msi
from eratosthenes.preprocessing.image_transforms import \
mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization
from eratosthenes.preprocessing.shadow_geometry import \
cast_orientation
from eratosthenes.preprocessing.shadow_filters import \
fade_shadow_cast, anistropic_diffusion_scalar
from eratosthenes.preprocessing.acquisition_geometry import \
get_template_acquisition_angles, get_template_aspect_slope
from eratosthenes.preprocessing.shadow_transforms import \
entropy_shade_removal, shadow_free_rgb, shade_index, \
normalized_range_shadow_index
from eratosthenes.preprocessing.color_transforms import rgb2lms
from eratosthenes.processing.matching_tools import \
get_coordinates_of_template_centers
from eratosthenes.processing.coupling_tools import \
match_pair
from eratosthenes.processing.matching_tools_differential import hough_sinus
from eratosthenes.postprocessing.solar_tools import \
make_shading, make_shadowing
from eratosthenes.presentation.image_io import \
output_image, resize_image, output_mask
toi = 15
window_size = 2**3 #2**3# #
boi = ['red', 'green', 'blue', 'nir']
s2_df = list_central_wavelength_msi()
s2_df = s2_df[s2_df['common_name'].isin(boi)]
if toi==15:
s2path = '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019/'
elif toi == 25:
s2path = '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-25-10-2019/'
fpath = os.path.join(s2path, 'shadow.tif')
M_dir, M_name = os.path.split(fpath)
#dat_path = '/Users/Alten005/GO-eratosthenes/start-code/examples/'
Z_file = "COP_DEM_red.tif"
R_file = "5VMG_RGI_red.tif"
Z_dir = os.path.join('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/',
'Cop-DEM-GLO-30')
dir_path = os.path.dirname(os.path.realpath(__file__))
if os.getcwd()!=dir_path:
os.chdir(dir_path) # change to directory where script is situated
(M, spatialRefM, geoTransformM, targetprjM) = read_geo_image(fpath)
M *= -1
Z = read_geo_image(os.path.join(Z_dir, Z_file))[0]
R = read_geo_image(os.path.join(Z_dir, R_file))[0]
# create observation angles
if toi==15:
fpath = os.path.join('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full', \
'T05VMG_20191015T213531_B08.jp2')
elif toi==25:
fpath = os.path.join('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full', \
'T05VMG_20191015T213531_B08.jp2')
_,spatialRefI,geoTransformI,targetprjI = read_geo_image(fpath)
path_meta = '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/'+\
'S2A_MSIL1C_20191015T213531_N0208_R086_T05VMG_20191015T230223.SAFE/'+\
'GRANULE/L1C_T05VMG_A022534_20191015T213843/QI_DATA'
#det_stack =read_detector_mask(path_meta, (10980, 10980, len(s2_df)), s2_df, geoTransformI)
#Zn,Az = read_view_angles_s2(s2path, 'metadata.xml', det_stack, s2_df)
#X_grd,Y_grd = pix_centers(geoTransformM, M.shape[0], M.shape[1], make_grid=True)
#I_grd,J_grd = map2pix(geoTransformI, X_grd, Y_grd)
#I_sub = bilinear_interpolation(Az[:,:,0], J_grd, I_grd)
#fpath = os.path.join('/Users/Alten005/GO-eratosthenes/start-code/examples/S2-15-10-2019/', \
# 'viewZn.tif')
#make_geo_im(I_sub, geoTransformM, spatialRefM, fpath)
if toi==15:
Det = read_geo_image(os.path.join(s2path, 'detIdBlue.tif'))[0]
Zn = read_geo_image(os.path.join(s2path, 'viewZn.tif'))[0]
Az = read_geo_image(os.path.join(s2path, 'viewAz.tif'))[0]
Blue = read_geo_image(os.path.join(s2path, 'B2.tif'))[0]
Green = read_geo_image(os.path.join(s2path, 'B3.tif'))[0]
Red = read_geo_image(os.path.join(s2path, 'B4.tif'))[0]
Near = read_geo_image(os.path.join(s2path, 'B8.tif'))[0]
Sn = normalized_range_shadow_index(mat_to_gray(Red),
mat_to_gray(Green),
mat_to_gray(Blue))
Sn = normalize_histogram(-Sn)
output_image(Sn, 'Red-normalized-range-shadow-index.jpg', cmap='gray')
Li,Mi,Si = rgb2lms(mat_to_gray(Red), mat_to_gray(Green), mat_to_gray(Blue))
RGB = shadow_free_rgb(mat_to_gray(Blue), mat_to_gray(Green),
mat_to_gray(Red), mat_to_gray(Near))
RGB = np.dstack((gamma_adjustment(Red, gamma=.25),
gamma_adjustment(Green, gamma=.25),
gamma_adjustment(Blue, gamma=.25)))
RGB = mat_to_gray(1.5*RGB)
RGB[RGB<.3] = .3
output_image(RGB, 'Red-rgb-gamma.jpg')
Si,Ri = entropy_shade_removal(mat_to_gray(Blue),
mat_to_gray(Red),
mat_to_gray(Near), a=138)
#Si,Ri = normalize_histogram(Si), normalize_histogram(Ri)
output_image(Si, 'Red-entropy-shade-removal.jpg', cmap='gray')
output_image(Ri, 'Red-entropy-albedo.jpg', cmap='gray')
ani = anistropic_diffusion_scalar(np.dstack((Sn,Si,Ri)), n=8)
output_image(ani[:,:,0], 'Red-entropy-anistropy.jpg', cmap='gray')
#Si = s_curve(Si, a=20, b=0)
#Si += .5
#Si[Si<-.5] = -.5
Si_ani = anistropic_diffusion_scalar(Si)
Sc = (mat_to_gray(-Sn)*mat_to_gray(Si))
sample_I, sample_J = get_coordinates_of_template_centers(Zn, window_size)
Slp,Asp = get_template_aspect_slope(Z,
sample_I,sample_J,
window_size)
output_image(resize_image(Asp, Z.shape, method='nearest'),
'Red-aspect-w'+str(2*window_size)+'.jpg', cmap='twilight')
output_image(resize_image(Slp, Z.shape, method='nearest'),
'Red-slope-w'+str(2*window_size)+'.jpg', cmap='magma')
#if toi==15:
# Azi,Zen = get_template_acquisition_angles(Az, Zn, Det.astype('int64'),
# sample_I, sample_J,
# window_size)
zn, az = 90-21.3, 174.2 # 15 Oct 2019
#Zn, Az = 90-17.7, 174.8 # 25 Oct 2019
# generate shadow image
Shw = make_shadowing(Z, az, zn)
Shd = make_shading(Z, az, zn)
import morphsnakes as ms
counts = 30
#M_acwe_si = ms.morphological_chan_vese(Si, counts, init_level_set=Shw,
# smoothing=0, lambda1=1, lambda2=1,
# albedo=Ri)
for i in np.linspace(0,counts,counts+1).astype(np.int8):
M_acwe_si = ms.morphological_chan_vese(Si, i, init_level_set=Shw,
smoothing=0, lambda1=1, lambda2=1,
albedo=Ri)
Bnd = get_mask_boundary(M_acwe_si)
output_mask(Bnd,'Red-shadow-polygon-raw'+str(i).zfill(3)+'.png')
output_image(-1.*M_acwe, 'Red-snake-shadow-si.jpg', cmap='gray')
# fading shadowing
Shf = fade_shadow_cast(Shw, az)
Shi = (Shd+.5) *(1-(0.75*Shf))
output_image(Shw!=True, 'Red-shadow.jpg', cmap='gray')
output_image(-1*Shf, 'Red-shadow-fade.jpg', cmap='gray')
#output_image(Shi, 'Red-artificial-scene.jpg', cmap='gray')
#plt.imshow(Shd, cmap='gray'), plt.show()
#plt.imshow(M, cmap='gray'), plt.show()
# filter along sun direction....?
#from eratosthenes.preprocessing.image_transforms import \
# log_adjustment, mat_to_gray
#L = log_adjustment(mat_to_gray(M)*2**8)/2**16
#H = match_histograms(L,Shd)
F = cast_orientation(Shd, Az, indexing='xy')
# fa = rotated_sobel(Az, indexing='xy')
# from scipy import ndimage
# D = ndimage.convolve(Shd, fa)
N = cast_orientation(Si, Az, indexing='xy')
# make Mask
Stable = (R==0) & (Shw!=1)
# window_size
t_rad = 3
t_size = (2*t_rad)+1
s_size = 7
num_disp = 1
sample_X,sample_Y = pix2map(geoTransformM, sample_I, sample_J)
match_X,match_Y,match_score = match_pair(Shi, Si,
Stable, Stable,
geoTransformM, geoTransformM,
sample_X, sample_Y,
temp_radius=window_size,
search_radius=window_size,
correlator='robu_corr', subpix='moment',
metric='peak_entr')
# correlator='hough_opt_flw',
# preprocessing='hist_equal',
# num_estimates=num_disp,
# max_amp=2)
if num_disp>1:
dY,dX = np.repeat(sample_Y[:,:,np.newaxis], num_disp, axis=2)-match_Y, \
np.repeat(sample_X[:,:,np.newaxis], num_disp, axis=2)-match_X
else:
dY,dX = sample_Y-match_Y, sample_X-match_X
IN = ~np.isnan(dX)
IN[IN] = np.remainder(dX[IN],1)!=0
#plt.imshow(dY, vmin=-20, vmax=+20), plt.show()
#plt.imshow(dX, vmin=-20, vmax=+20), plt.show()
#plt.figure(), plt.hexbin(dX[IN], dY[IN], gridsize=40)
# Slp<20
dx_coreg, dy_coreg = np.median(dX[IN]), np.median(dY[IN])
di_coreg = +1*dy_coreg/geoTransformM[5]
dj_coreg = +1*dx_coreg/geoTransformM[1]
Sc_coreg = bilinear_interpolation(Sc, dj_coreg, di_coreg)
Si_coreg = bilinear_interpolation(Si, dj_coreg, di_coreg)
Ri_coreg = bilinear_interpolation(Ri, dj_coreg, di_coreg)
RGB_coreg = bilinear_interpolation(RGB, dj_coreg, di_coreg)
#fout = os.path.join(s2path, 'shadows_coreg.tif')
#make_geo_im(Sc_coreg, geoTransformM, targetprjI, fout)
#fout = os.path.join(s2path, 'shadow_coreg.tif')
#make_geo_im(Si_coreg, geoTransformM, targetprjI, fout)
#fout = os.path.join(s2path, 'albedo_coreg.tif')
#make_geo_im(Ri_coreg, geoTransformM, targetprjI, fout)
#fout = os.path.join(s2path, 'rgb_clean.tif')
#make_geo_im(RGB_coreg, geoTransformM, targetprjI, fout)
#fout = os.path.join(s2path, 'shading.tif')
#make_geo_im(Shd, geoTransformM, targetprjI, fout)
#fout = os.path.join(s2path, 'shadowing.tif')
#make_geo_im(Shw, geoTransformM, targetprjI, fout)
Rho, Phi = np.sqrt(dY**2+dX**2), np.arctan2(dX,dY)
match_score[match_score==0] = np.nan
output_image(resize_image(match_score, Z.shape, method='nearest'),
'Red-disp-score-w'+str(2*window_size)+'.jpg', cmap='viridis')
output_image(resize_image(Phi, Z.shape, method='nearest'),
'Red-disp-dir-w'+str(2*window_size)+'.jpg', cmap='twilight')
output_image(resize_image(Rho, Z.shape, method='nearest'),
'Red-disp-mag-w'+str(2*window_size)+'.jpg', cmap='magma')
dY_coreg, dX_coreg = dY-dy_coreg, dX-dx_coreg
Rho_coreg, Phi_coreg = np.sqrt(dY_coreg**2+dX_coreg**2), \
np.arctan2(dX_coreg,dY_coreg)
output_image(resize_image(Phi_coreg, Z.shape, method='nearest'),
'Red-disp-dir-coreg-w'+str(2*window_size)+'.jpg', cmap='twilight')
output_image(resize_image(Rho_coreg, Z.shape, method='nearest'),
'Red-disp-mag-coreg-w'+str(2*window_size)+'.jpg', cmap='magma')
#
#from sklearn import linear_model
#lr = linear_model.LinearRegression()
#lr.fit(x, y)
#ransac = linear_model.RANSACRegressor(max_trials=1000,min_samples=300)
#ransac.fit(Si[Stable].reshape(-1, 1), Shd[Stable].reshape(-1, 1))
#Sr = ransac.predict(Si.reshape(-1, 1)).reshape(Si.shape)
#plt.scatter(asp_val,asp_med,s=1,c='black',marker='.')
#plt.scatter(asp_val,asp_num,s=1,c='black',marker='.')
#IN = asp_num>np.quantile(asp_num, .5)
#plt.scatter(asp_val[IN],asp_num[IN],s=1,c='blue',marker='.')
#Sm = match_histograms(-Si[Stable],Shd[Stable])
sample_I, sample_J = get_coordinates_of_template_centers(Zn, window_size)
Slp,Asp = get_template_aspect_slope(Z,
sample_I,sample_J,
t_size)
fig = plt.figure()
ax = fig.add_subplot(projection='polar')
c = ax.scatter(Phi[Slp<20], Rho[Slp<20],2,match_score[Slp<20])
ax.set_rmax(40)
dI = Shd-Si #Shd + 4*Si#-L
tan_Slp = np.tan(np.radians(Slp))
dD = np.divide(dI,tan_Slp,
out=np.zeros_like(tan_Slp), where=tan_Slp!=0)
Curv = terrain_curvature(Z)
Z_az = ridge_orientation(Z)
Ridge = Curv>1.
#plt.imshow(dI, cmap=plt.cm.RbBu)
Asp_Shd,dD_Shd = hough_sinus(np.radians(Asp[Stable]), Shd[Stable],
max_amp=1, sample_fraction=5000)
Asp_Si,dD_Si = hough_sinus(np.radians(Asp[Stable]), Si[Stable],
max_amp=1, sample_fraction=5000)
fig, (ax0,ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
plt.scatter(Curv[Ridge], Z_az[Ridge])
#di, dj = pol2cart(dD_H, Asp_H)
fig, (ax0,ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
ax0.hist2d(Asp.flatten(), Si.flatten(),
bins=90, range=[[-180, +180], [0, 1]],
cmap=plt.cm.gist_heat_r)
ax1.hist2d(Asp[Stable], Si[Stable],
bins=90, range=[[-180, +180], [0, 1]],
cmap=plt.cm.gist_heat_r)
plt.show()
fig, (ax0,ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
ax0.hist2d(Asp[Stable], Shd[Stable],
bins=90, range=[[-180, +180], [-.5, .5]],
cmap=plt.cm.gist_heat_r)
ax1.hist2d(Asp[Stable], Si[Stable],
bins=90, range=[[-180, +180], [-.5, .5]],
cmap=plt.cm.gist_heat_r)
#ax1.scatter(asp_val,asp_med,s=1,c='black',marker='.')
plt.show()
fig, (ax0,ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
ax0.imshow(-Si, cmap=plt.cm.bone)
ax1.imshow(Shd, cmap=plt.cm.bone)
plt.show()
fig, (ax0,ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
ax0.imshow(F, cmap=plt.cm.bone)
ax1.imshow(N, cmap=plt.cm.bone)
plt.show()
plt.hist2d(np.divide(Blue-Green,Blue+Green)[Stable],
np.divide(Red-Near,Red+Near)[Stable],
bins=100, range=[[-1, 1], [-1, 1]],
cmap=plt.cm.gist_heat_r)
plt.show()
plt.hist2d(Asp[Stable], dI[Stable],
bins=90, range=[[-180, +180], [-1, +1]],
cmap=plt.cm.gist_heat_r)
plt.show()
plt.hist(Asp[Stable])
plt.show()
dY,dX = sample_Y-match_Y, sample_X-match_X
#plt.imshow(dY, vmin=-20, vmax=+20), plt.show()
#plt.imshow(dX, vmin=-20, vmax=+20), plt.show()
Rho, Phi = np.sqrt(dY**2,dX**2), np.arctan2(dY,dX)
dD = np.divide(Rho,np.tan(np.radians(Slp)))
dD = np.multiply(Rho,np.tan(np.radians(Slp)))
IN = dD!=0
# IN = (dX!=0) & (dY!=0)
plt.hist2d(Asp[IN], dD[IN], bins=90, range=[[-180, +180], [0, .4]], cmap=plt.cm.jet)
plt.show()
plt.scatter(Phi, np.divide(Rho,np.tan(np.radians(Slp))))
plt.show()
| [
"numpy.radians",
"matplotlib.pyplot.hist",
"numpy.sqrt",
"eratosthenes.processing.coupling_tools.match_pair",
"eratosthenes.preprocessing.image_transforms.gamma_adjustment",
"numpy.arctan2",
"eratosthenes.preprocessing.image_transforms.normalize_histogram",
"numpy.divide",
"eratosthenes.generic.mapp... | [((1709, 1738), 'eratosthenes.input.read_sentinel2.list_central_wavelength_msi', 'list_central_wavelength_msi', ([], {}), '()\n', (1736, 1738), False, 'from eratosthenes.input.read_sentinel2 import list_central_wavelength_msi\n'), ((2005, 2039), 'os.path.join', 'os.path.join', (['s2path', '"""shadow.tif"""'], {}), "(s2path, 'shadow.tif')\n", (2017, 2039), False, 'import os\n'), ((2057, 2077), 'os.path.split', 'os.path.split', (['fpath'], {}), '(fpath)\n', (2070, 2077), False, 'import os\n'), ((2208, 2296), 'os.path.join', 'os.path.join', (['"""/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/"""', '"""Cop-DEM-GLO-30"""'], {}), "('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/',\n 'Cop-DEM-GLO-30')\n", (2220, 2296), False, 'import os\n'), ((2514, 2535), 'eratosthenes.generic.mapping_io.read_geo_image', 'read_geo_image', (['fpath'], {}), '(fpath)\n', (2528, 2535), False, 'from eratosthenes.generic.mapping_io import read_geo_image, make_geo_im\n'), ((3083, 3104), 'eratosthenes.generic.mapping_io.read_geo_image', 'read_geo_image', (['fpath'], {}), '(fpath)\n', (3097, 3104), False, 'from eratosthenes.generic.mapping_io import read_geo_image, make_geo_im\n'), ((4464, 4488), 'eratosthenes.preprocessing.image_transforms.normalize_histogram', 'normalize_histogram', (['(-Sn)'], {}), '(-Sn)\n', (4483, 4488), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4489, 4559), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['Sn', '"""Red-normalized-range-shadow-index.jpg"""'], {'cmap': '"""gray"""'}), "(Sn, 'Red-normalized-range-shadow-index.jpg', cmap='gray')\n", (4501, 4559), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((4922, 4944), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['(1.5 * RGB)'], {}), '(1.5 * RGB)\n', (4933, 4944), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4960, 4998), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['RGB', '"""Red-rgb-gamma.jpg"""'], {}), "(RGB, 'Red-rgb-gamma.jpg')\n", (4972, 4998), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((5212, 5274), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['Si', '"""Red-entropy-shade-removal.jpg"""'], {'cmap': '"""gray"""'}), "(Si, 'Red-entropy-shade-removal.jpg', cmap='gray')\n", (5224, 5274), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((5275, 5330), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['Ri', '"""Red-entropy-albedo.jpg"""'], {'cmap': '"""gray"""'}), "(Ri, 'Red-entropy-albedo.jpg', cmap='gray')\n", (5287, 5330), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((5395, 5463), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['ani[:, :, 0]', '"""Red-entropy-anistropy.jpg"""'], {'cmap': '"""gray"""'}), "(ani[:, :, 0], 'Red-entropy-anistropy.jpg', cmap='gray')\n", (5407, 5463), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((5531, 5562), 'eratosthenes.preprocessing.shadow_filters.anistropic_diffusion_scalar', 'anistropic_diffusion_scalar', (['Si'], {}), '(Si)\n', (5558, 5562), False, 'from eratosthenes.preprocessing.shadow_filters import fade_shadow_cast, anistropic_diffusion_scalar\n'), ((5625, 5677), 'eratosthenes.processing.matching_tools.get_coordinates_of_template_centers', 'get_coordinates_of_template_centers', (['Zn', 'window_size'], {}), '(Zn, window_size)\n', (5660, 5677), False, 'from eratosthenes.processing.matching_tools import get_coordinates_of_template_centers\n'), ((5689, 5750), 'eratosthenes.preprocessing.acquisition_geometry.get_template_aspect_slope', 'get_template_aspect_slope', (['Z', 'sample_I', 'sample_J', 'window_size'], {}), '(Z, sample_I, sample_J, window_size)\n', (5714, 5750), False, 'from eratosthenes.preprocessing.acquisition_geometry import get_template_acquisition_angles, get_template_aspect_slope\n'), ((6407, 6432), 'eratosthenes.postprocessing.solar_tools.make_shadowing', 'make_shadowing', (['Z', 'az', 'zn'], {}), '(Z, az, zn)\n', (6421, 6432), False, 'from eratosthenes.postprocessing.solar_tools import make_shading, make_shadowing\n'), ((6440, 6463), 'eratosthenes.postprocessing.solar_tools.make_shading', 'make_shading', (['Z', 'az', 'zn'], {}), '(Z, az, zn)\n', (6452, 6463), False, 'from eratosthenes.postprocessing.solar_tools import make_shading, make_shadowing\n'), ((7070, 7137), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['(-1.0 * M_acwe)', '"""Red-snake-shadow-si.jpg"""'], {'cmap': '"""gray"""'}), "(-1.0 * M_acwe, 'Red-snake-shadow-si.jpg', cmap='gray')\n", (7082, 7137), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((7162, 7187), 'eratosthenes.preprocessing.shadow_filters.fade_shadow_cast', 'fade_shadow_cast', (['Shw', 'az'], {}), '(Shw, az)\n', (7178, 7187), False, 'from eratosthenes.preprocessing.shadow_filters import fade_shadow_cast, anistropic_diffusion_scalar\n'), ((7220, 7276), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['(Shw != True)', '"""Red-shadow.jpg"""'], {'cmap': '"""gray"""'}), "(Shw != True, 'Red-shadow.jpg', cmap='gray')\n", (7232, 7276), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((7275, 7333), 'eratosthenes.presentation.image_io.output_image', 'output_image', (['(-1 * Shf)', '"""Red-shadow-fade.jpg"""'], {'cmap': '"""gray"""'}), "(-1 * Shf, 'Red-shadow-fade.jpg', cmap='gray')\n", (7287, 7333), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((7686, 7726), 'eratosthenes.preprocessing.shadow_geometry.cast_orientation', 'cast_orientation', (['Shd', 'Az'], {'indexing': '"""xy"""'}), "(Shd, Az, indexing='xy')\n", (7702, 7726), False, 'from eratosthenes.preprocessing.shadow_geometry import cast_orientation\n'), ((7833, 7872), 'eratosthenes.preprocessing.shadow_geometry.cast_orientation', 'cast_orientation', (['Si', 'Az'], {'indexing': '"""xy"""'}), "(Si, Az, indexing='xy')\n", (7849, 7872), False, 'from eratosthenes.preprocessing.shadow_geometry import cast_orientation\n'), ((8003, 8045), 'eratosthenes.generic.mapping_tools.pix2map', 'pix2map', (['geoTransformM', 'sample_I', 'sample_J'], {}), '(geoTransformM, sample_I, sample_J)\n', (8010, 8045), False, 'from eratosthenes.generic.mapping_tools import pix2map\n'), ((8076, 8282), 'eratosthenes.processing.coupling_tools.match_pair', 'match_pair', (['Shi', 'Si', 'Stable', 'Stable', 'geoTransformM', 'geoTransformM', 'sample_X', 'sample_Y'], {'temp_radius': 'window_size', 'search_radius': 'window_size', 'correlator': '"""robu_corr"""', 'subpix': '"""moment"""', 'metric': '"""peak_entr"""'}), "(Shi, Si, Stable, Stable, geoTransformM, geoTransformM, sample_X,\n sample_Y, temp_radius=window_size, search_radius=window_size,\n correlator='robu_corr', subpix='moment', metric='peak_entr')\n", (8086, 8282), False, 'from eratosthenes.processing.coupling_tools import match_pair\n'), ((9410, 9456), 'eratosthenes.generic.handler_im.bilinear_interpolation', 'bilinear_interpolation', (['Sc', 'dj_coreg', 'di_coreg'], {}), '(Sc, dj_coreg, di_coreg)\n', (9432, 9456), False, 'from eratosthenes.generic.handler_im import bilinear_interpolation, rescale_image\n'), ((9468, 9514), 'eratosthenes.generic.handler_im.bilinear_interpolation', 'bilinear_interpolation', (['Si', 'dj_coreg', 'di_coreg'], {}), '(Si, dj_coreg, di_coreg)\n', (9490, 9514), False, 'from eratosthenes.generic.handler_im import bilinear_interpolation, rescale_image\n'), ((9526, 9572), 'eratosthenes.generic.handler_im.bilinear_interpolation', 'bilinear_interpolation', (['Ri', 'dj_coreg', 'di_coreg'], {}), '(Ri, dj_coreg, di_coreg)\n', (9548, 9572), False, 'from eratosthenes.generic.handler_im import bilinear_interpolation, rescale_image\n'), ((9585, 9632), 'eratosthenes.generic.handler_im.bilinear_interpolation', 'bilinear_interpolation', (['RGB', 'dj_coreg', 'di_coreg'], {}), '(RGB, dj_coreg, di_coreg)\n', (9607, 9632), False, 'from eratosthenes.generic.handler_im import bilinear_interpolation, rescale_image\n'), ((11759, 11811), 'eratosthenes.processing.matching_tools.get_coordinates_of_template_centers', 'get_coordinates_of_template_centers', (['Zn', 'window_size'], {}), '(Zn, window_size)\n', (11794, 11811), False, 'from eratosthenes.processing.matching_tools import get_coordinates_of_template_centers\n'), ((11823, 11879), 'eratosthenes.preprocessing.acquisition_geometry.get_template_aspect_slope', 'get_template_aspect_slope', (['Z', 'sample_I', 'sample_J', 't_size'], {}), '(Z, sample_I, sample_J, t_size)\n', (11848, 11879), False, 'from eratosthenes.preprocessing.acquisition_geometry import get_template_acquisition_angles, get_template_aspect_slope\n'), ((11958, 11970), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11968, 11970), True, 'import matplotlib.pyplot as plt\n'), ((12250, 12270), 'eratosthenes.generic.terrain_tools.terrain_curvature', 'terrain_curvature', (['Z'], {}), '(Z)\n', (12267, 12270), False, 'from eratosthenes.generic.terrain_tools import ridge_orientation, terrain_curvature\n'), ((12278, 12298), 'eratosthenes.generic.terrain_tools.ridge_orientation', 'ridge_orientation', (['Z'], {}), '(Z)\n', (12295, 12298), False, 'from eratosthenes.generic.terrain_tools import ridge_orientation, terrain_curvature\n'), ((12628, 12684), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=1, ncols=2, sharex=True, sharey=True)\n', (12640, 12684), True, 'import matplotlib.pyplot as plt\n'), ((12686, 12723), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Curv[Ridge]', 'Z_az[Ridge]'], {}), '(Curv[Ridge], Z_az[Ridge])\n', (12697, 12723), True, 'import matplotlib.pyplot as plt\n'), ((12775, 12831), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=1, ncols=2, sharex=True, sharey=True)\n', (12787, 12831), True, 'import matplotlib.pyplot as plt\n'), ((13080, 13090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13088, 13090), True, 'import matplotlib.pyplot as plt\n'), ((13109, 13165), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=1, ncols=2, sharex=True, sharey=True)\n', (13121, 13165), True, 'import matplotlib.pyplot as plt\n'), ((13472, 13482), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13480, 13482), True, 'import matplotlib.pyplot as plt\n'), ((13501, 13557), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=1, ncols=2, sharex=True, sharey=True)\n', (13513, 13557), True, 'import matplotlib.pyplot as plt\n'), ((13626, 13636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13634, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13655, 13711), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=1, ncols=2, sharex=True, sharey=True)\n', (13667, 13711), True, 'import matplotlib.pyplot as plt\n'), ((13776, 13786), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13784, 13786), True, 'import matplotlib.pyplot as plt\n'), ((13973, 13983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13981, 13983), True, 'import matplotlib.pyplot as plt\n'), ((13985, 14090), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['Asp[Stable]', 'dI[Stable]'], {'bins': '(90)', 'range': '[[-180, +180], [-1, +1]]', 'cmap': 'plt.cm.gist_heat_r'}), '(Asp[Stable], dI[Stable], bins=90, range=[[-180, +180], [-1, +1]],\n cmap=plt.cm.gist_heat_r)\n', (13995, 14090), True, 'import matplotlib.pyplot as plt\n'), ((14109, 14119), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14117, 14119), True, 'import matplotlib.pyplot as plt\n'), ((14121, 14142), 'matplotlib.pyplot.hist', 'plt.hist', (['Asp[Stable]'], {}), '(Asp[Stable])\n', (14129, 14142), True, 'import matplotlib.pyplot as plt\n'), ((14143, 14153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14151, 14153), True, 'import matplotlib.pyplot as plt\n'), ((14474, 14564), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['Asp[IN]', 'dD[IN]'], {'bins': '(90)', 'range': '[[-180, +180], [0, 0.4]]', 'cmap': 'plt.cm.jet'}), '(Asp[IN], dD[IN], bins=90, range=[[-180, +180], [0, 0.4]], cmap=\n plt.cm.jet)\n', (14484, 14564), True, 'import matplotlib.pyplot as plt\n'), ((14559, 14569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14567, 14569), True, 'import matplotlib.pyplot as plt\n'), ((14628, 14638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14636, 14638), True, 'import matplotlib.pyplot as plt\n'), ((2343, 2369), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2359, 2369), False, 'import os\n'), ((2374, 2385), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2383, 2385), False, 'import os\n'), ((2401, 2419), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (2409, 2419), False, 'import os\n'), ((2700, 2839), 'os.path.join', 'os.path.join', (['"""/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full"""', '"""T05VMG_20191015T213531_B08.jp2"""'], {}), "(\n '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full'\n , 'T05VMG_20191015T213531_B08.jp2')\n", (2712, 2839), False, 'import os\n'), ((4332, 4348), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Red'], {}), '(Red)\n', (4343, 4348), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4385, 4403), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Green'], {}), '(Green)\n', (4396, 4403), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4440, 4457), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Blue'], {}), '(Blue)\n', (4451, 4457), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4580, 4596), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Red'], {}), '(Red)\n', (4591, 4596), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4598, 4616), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Green'], {}), '(Green)\n', (4609, 4616), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4618, 4635), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Blue'], {}), '(Blue)\n', (4629, 4635), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4659, 4676), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Blue'], {}), '(Blue)\n', (4670, 4676), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4678, 4696), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Green'], {}), '(Green)\n', (4689, 4696), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4720, 4736), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Red'], {}), '(Red)\n', (4731, 4736), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4738, 4755), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Near'], {}), '(Near)\n', (4749, 4755), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((5030, 5047), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Blue'], {}), '(Blue)\n', (5041, 5047), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((5079, 5095), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Red'], {}), '(Red)\n', (5090, 5095), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((5127, 5144), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Near'], {}), '(Near)\n', (5138, 5144), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((5367, 5390), 'numpy.dstack', 'np.dstack', (['(Sn, Si, Ri)'], {}), '((Sn, Si, Ri))\n', (5376, 5390), True, 'import numpy as np\n'), ((5569, 5585), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['(-Sn)'], {}), '(-Sn)\n', (5580, 5585), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((5586, 5601), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Si'], {}), '(Si)\n', (5597, 5601), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((5835, 5879), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['Asp', 'Z.shape'], {'method': '"""nearest"""'}), "(Asp, Z.shape, method='nearest')\n", (5847, 5879), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((5966, 6010), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['Slp', 'Z.shape'], {'method': '"""nearest"""'}), "(Slp, Z.shape, method='nearest')\n", (5978, 6010), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((6775, 6879), 'morphsnakes.morphological_chan_vese', 'ms.morphological_chan_vese', (['Si', 'i'], {'init_level_set': 'Shw', 'smoothing': '(0)', 'lambda1': '(1)', 'lambda2': '(1)', 'albedo': 'Ri'}), '(Si, i, init_level_set=Shw, smoothing=0, lambda1=\n 1, lambda2=1, albedo=Ri)\n', (6801, 6879), True, 'import morphsnakes as ms\n'), ((6971, 6999), 'eratosthenes.generic.gis_tools.get_mask_boundary', 'get_mask_boundary', (['M_acwe_si'], {}), '(M_acwe_si)\n', (6988, 6999), False, 'from eratosthenes.generic.gis_tools import get_mask_boundary\n'), ((9049, 9061), 'numpy.isnan', 'np.isnan', (['dX'], {}), '(dX)\n', (9057, 9061), True, 'import numpy as np\n'), ((9071, 9094), 'numpy.remainder', 'np.remainder', (['dX[IN]', '(1)'], {}), '(dX[IN], 1)\n', (9083, 9094), True, 'import numpy as np\n'), ((9280, 9297), 'numpy.median', 'np.median', (['dX[IN]'], {}), '(dX[IN])\n', (9289, 9297), True, 'import numpy as np\n'), ((9299, 9316), 'numpy.median', 'np.median', (['dY[IN]'], {}), '(dY[IN])\n', (9308, 9316), True, 'import numpy as np\n'), ((10258, 10284), 'numpy.sqrt', 'np.sqrt', (['(dY ** 2 + dX ** 2)'], {}), '(dY ** 2 + dX ** 2)\n', (10265, 10284), True, 'import numpy as np\n'), ((10280, 10298), 'numpy.arctan2', 'np.arctan2', (['dX', 'dY'], {}), '(dX, dY)\n', (10290, 10298), True, 'import numpy as np\n'), ((10349, 10401), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['match_score', 'Z.shape'], {'method': '"""nearest"""'}), "(match_score, Z.shape, method='nearest')\n", (10361, 10401), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((10491, 10535), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['Phi', 'Z.shape'], {'method': '"""nearest"""'}), "(Phi, Z.shape, method='nearest')\n", (10503, 10535), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((10624, 10668), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['Rho', 'Z.shape'], {'method': '"""nearest"""'}), "(Rho, Z.shape, method='nearest')\n", (10636, 10668), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((10811, 10849), 'numpy.sqrt', 'np.sqrt', (['(dY_coreg ** 2 + dX_coreg ** 2)'], {}), '(dY_coreg ** 2 + dX_coreg ** 2)\n', (10818, 10849), True, 'import numpy as np\n'), ((10870, 10900), 'numpy.arctan2', 'np.arctan2', (['dX_coreg', 'dY_coreg'], {}), '(dX_coreg, dY_coreg)\n', (10880, 10900), True, 'import numpy as np\n'), ((10913, 10963), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['Phi_coreg', 'Z.shape'], {'method': '"""nearest"""'}), "(Phi_coreg, Z.shape, method='nearest')\n", (10925, 10963), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((11058, 11108), 'eratosthenes.presentation.image_io.resize_image', 'resize_image', (['Rho_coreg', 'Z.shape'], {'method': '"""nearest"""'}), "(Rho_coreg, Z.shape, method='nearest')\n", (11070, 11108), False, 'from eratosthenes.presentation.image_io import output_image, resize_image, output_mask\n'), ((12137, 12152), 'numpy.radians', 'np.radians', (['Slp'], {}), '(Slp)\n', (12147, 12152), True, 'import numpy as np\n'), ((12385, 12408), 'numpy.radians', 'np.radians', (['Asp[Stable]'], {}), '(Asp[Stable])\n', (12395, 12408), True, 'import numpy as np\n'), ((12512, 12535), 'numpy.radians', 'np.radians', (['Asp[Stable]'], {}), '(Asp[Stable])\n', (12522, 12535), True, 'import numpy as np\n'), ((14306, 14331), 'numpy.sqrt', 'np.sqrt', (['(dY ** 2)', '(dX ** 2)'], {}), '(dY ** 2, dX ** 2)\n', (14313, 14331), True, 'import numpy as np\n'), ((14328, 14346), 'numpy.arctan2', 'np.arctan2', (['dY', 'dX'], {}), '(dY, dX)\n', (14338, 14346), True, 'import numpy as np\n'), ((2564, 2591), 'os.path.join', 'os.path.join', (['Z_dir', 'Z_file'], {}), '(Z_dir, Z_file)\n', (2576, 2591), False, 'import os\n'), ((2615, 2642), 'os.path.join', 'os.path.join', (['Z_dir', 'R_file'], {}), '(Z_dir, R_file)\n', (2627, 2642), False, 'import os\n'), ((2884, 3023), 'os.path.join', 'os.path.join', (['"""/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full"""', '"""T05VMG_20191015T213531_B08.jp2"""'], {}), "(\n '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full'\n , 'T05VMG_20191015T213531_B08.jp2')\n", (2896, 3023), False, 'import os\n'), ((4089, 4119), 'os.path.join', 'os.path.join', (['s2path', '"""B2.tif"""'], {}), "(s2path, 'B2.tif')\n", (4101, 4119), False, 'import os\n'), ((4147, 4177), 'os.path.join', 'os.path.join', (['s2path', '"""B3.tif"""'], {}), "(s2path, 'B3.tif')\n", (4159, 4177), False, 'import os\n'), ((4203, 4233), 'os.path.join', 'os.path.join', (['s2path', '"""B4.tif"""'], {}), "(s2path, 'B4.tif')\n", (4215, 4233), False, 'import os\n'), ((4260, 4290), 'os.path.join', 'os.path.join', (['s2path', '"""B8.tif"""'], {}), "(s2path, 'B8.tif')\n", (4272, 4290), False, 'import os\n'), ((4775, 4808), 'eratosthenes.preprocessing.image_transforms.gamma_adjustment', 'gamma_adjustment', (['Red'], {'gamma': '(0.25)'}), '(Red, gamma=0.25)\n', (4791, 4808), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4826, 4861), 'eratosthenes.preprocessing.image_transforms.gamma_adjustment', 'gamma_adjustment', (['Green'], {'gamma': '(0.25)'}), '(Green, gamma=0.25)\n', (4842, 4861), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((4879, 4913), 'eratosthenes.preprocessing.image_transforms.gamma_adjustment', 'gamma_adjustment', (['Blue'], {'gamma': '(0.25)'}), '(Blue, gamma=0.25)\n', (4895, 4913), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray, normalize_histogram, gamma_adjustment, histogram_equalization\n'), ((6711, 6745), 'numpy.linspace', 'np.linspace', (['(0)', 'counts', '(counts + 1)'], {}), '(0, counts, counts + 1)\n', (6722, 6745), True, 'import numpy as np\n'), ((12200, 12222), 'numpy.zeros_like', 'np.zeros_like', (['tan_Slp'], {}), '(tan_Slp)\n', (12213, 12222), True, 'import numpy as np\n'), ((13799, 13836), 'numpy.divide', 'np.divide', (['(Blue - Green)', '(Blue + Green)'], {}), '(Blue - Green, Blue + Green)\n', (13808, 13836), True, 'import numpy as np\n'), ((13852, 13885), 'numpy.divide', 'np.divide', (['(Red - Near)', '(Red + Near)'], {}), '(Red - Near, Red + Near)\n', (13861, 13885), True, 'import numpy as np\n'), ((14373, 14388), 'numpy.radians', 'np.radians', (['Slp'], {}), '(Slp)\n', (14383, 14388), True, 'import numpy as np\n'), ((14419, 14434), 'numpy.radians', 'np.radians', (['Slp'], {}), '(Slp)\n', (14429, 14434), True, 'import numpy as np\n'), ((3898, 3935), 'os.path.join', 'os.path.join', (['s2path', '"""detIdBlue.tif"""'], {}), "(s2path, 'detIdBlue.tif')\n", (3910, 3935), False, 'import os\n'), ((3964, 3998), 'os.path.join', 'os.path.join', (['s2path', '"""viewZn.tif"""'], {}), "(s2path, 'viewZn.tif')\n", (3976, 3998), False, 'import os\n'), ((4027, 4061), 'os.path.join', 'os.path.join', (['s2path', '"""viewAz.tif"""'], {}), "(s2path, 'viewAz.tif')\n", (4039, 4061), False, 'import os\n'), ((8850, 8905), 'numpy.repeat', 'np.repeat', (['sample_Y[:, :, np.newaxis]', 'num_disp'], {'axis': '(2)'}), '(sample_Y[:, :, np.newaxis], num_disp, axis=2)\n', (8859, 8905), True, 'import numpy as np\n'), ((8927, 8982), 'numpy.repeat', 'np.repeat', (['sample_X[:, :, np.newaxis]', 'num_disp'], {'axis': '(2)'}), '(sample_X[:, :, np.newaxis], num_disp, axis=2)\n', (8936, 8982), True, 'import numpy as np\n'), ((14609, 14624), 'numpy.radians', 'np.radians', (['Slp'], {}), '(Slp)\n', (14619, 14624), True, 'import numpy as np\n')] |
import os
from classy_blocks.classes.mesh import Mesh
from classy_blocks.classes.operations import Face, Extrude
import numpy as np
def load_airfoil_file(filename, chord=1):
points_upper = []
points_lower = []
def line_to_numbers(line):
line = line.strip()
p2d = [float(s) for s in line.split()]
# add a z-coordinate
return [p2d[0], p2d[1], 0]
with open(filename, 'r') as f:
# reads Lednicer airfoil file from airfoiltools.com
# sample: http://airfoiltools.com/airfoil/details?airfoil=tempest1-il
# first line: name, nothing useful
f.readline()
# second line: number of points for upper and lower portion
n_points = line_to_numbers(f.readline())
# an empty line
f.readline()
for _ in range(int(n_points[0])):
points_upper.append(line_to_numbers(f.readline()))
# an empty line between upper and lower part
f.readline()
for _ in range(int(n_points[1])):
points_lower.append(line_to_numbers(f.readline()))
return np.array(points_upper)*chord, np.array(points_lower)*chord
# finding points closest to wanted coordinate
def find_y(points, x):
i_result = 0
y_result = 0
for i, p in enumerate(points):
if p[0] > x:
i_result = i
y_result = p[1]
break
return i_result, y_result
def get_mesh():
###
### parameters
###
chord = 0.5 # chord length [m]
domain_radius = 1.0 # defines domain size in front of the airfoil and domain height [m]
radius_center = 0.30 # position of circle radius, relative to chord []
domain_length = 10.0*chord # length of rectangular section behind the half-circle [m]
thickness = [0, 0, 0.2] # domain thickness (extrude vector)
cell_size = 0.01
###
### point preparation
###
p_upper, p_lower = load_airfoil_file(os.path.join('examples', 'operation', 'airfoil_1.dat'), chord=chord)
###
### block creation
###
# top block 1
i, y, = find_y(p_upper, chord*radius_center)
max_x_1 = chord*radius_center
radius_edge = chord*domain_radius*2**0.5
face_top_1_vertices = [
[max_x_1-domain_radius, 0, 0],
[0, 0, 0],
[max_x_1, y, 0],
[max_x_1, domain_radius, 0],
]
face_top_1_edges = [
None,
p_upper[0:i-1],
None,
[-radius_edge + chord*radius_center, radius_edge, 0]
]
# create a face from points and edges
face_top_1 = Face(face_top_1_vertices, face_top_1_edges)
# create an Extrude operation from face and extrude vector
extrude_top_1 = Extrude(face_top_1, thickness)
# set cell counts on all axes for the first block
extrude_top_1.chop(0, start_size=cell_size, c2c_expansion=1.1, invert=True)
extrude_top_1.chop(1, count=30)
extrude_top_1.chop(2, count=1)
# top block 2
face_top_2_vertices = [
face_top_1_vertices[2],
[chord, 0, 0],
[chord, domain_radius, 0],
[max_x_1, domain_radius, 0]
]
face_top_2_edges = [p_upper[i:], None, None, None]
face_top_2 = Face(face_top_2_vertices, face_top_2_edges)
extrude_top_2 = Extrude(face_top_2, thickness)
extrude_top_2.chop(0, start_size=cell_size)
# other cell counts must match other blocks' so they need not be set
# top block 3
face_top_3 = Face([
[chord, 0, 0],
[domain_length, 0, 0],
[domain_length, domain_radius, 0],
[chord, domain_radius, 0]
])
extrude_top_3 = Extrude(face_top_3, thickness)
extrude_top_3.chop(0, start_size=cell_size, c2c_expansion=1.1)
# bottom block 1
i, y, = find_y(p_lower, chord*radius_center)
face_bottom_1_vertices = [
[max_x_1-domain_radius, 0, 0],
[max_x_1, -domain_radius, 0],
[max_x_1, y, 0],
[0, 0, 0],
]
face_bottom_1_edges = [
[-radius_edge + chord*radius_center, -radius_edge, 0],
None,
np.flip(p_lower[0:i-1], axis=0), # this block is defined in reverse so edge points must be reversed as well
None
]
face_bottom_1 = Face(face_bottom_1_vertices, face_bottom_1_edges)
extrude_bottom_1 = Extrude(face_bottom_1, thickness)
extrude_bottom_1.chop(0, count=30)
# bottom block 2
face_bottom_2_vertices = [
face_bottom_1_vertices[2],
[max_x_1, -domain_radius, 0],
[chord, -domain_radius, 0],
[chord, 0, 0]
]
face_bottom_2_edges = [None, None, None, np.flip(p_lower[i:], axis=0)]
face_bottom_2 = Face(face_bottom_2_vertices, face_bottom_2_edges)
extrude_bottom_2 = Extrude(face_bottom_2, thickness)
extrude_bottom_2.chop(1, start_size=cell_size)
# bottom block 3
face_bottom_3 = Face([
[chord, 0, 0],
[chord, -domain_radius, 0],
[domain_length, -domain_radius, 0],
[domain_length, 0, 0]
])
extrude_bottom_3 = Extrude(face_bottom_3, thickness)
mesh = Mesh()
mesh.add(extrude_top_1)
mesh.add(extrude_top_2)
mesh.add(extrude_top_3)
mesh.add(extrude_bottom_1)
mesh.add(extrude_bottom_2)
mesh.add(extrude_bottom_3)
return mesh
| [
"numpy.flip",
"os.path.join",
"classy_blocks.classes.operations.Face",
"numpy.array",
"classy_blocks.classes.operations.Extrude",
"classy_blocks.classes.mesh.Mesh"
] | [((2551, 2594), 'classy_blocks.classes.operations.Face', 'Face', (['face_top_1_vertices', 'face_top_1_edges'], {}), '(face_top_1_vertices, face_top_1_edges)\n', (2555, 2594), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((2678, 2708), 'classy_blocks.classes.operations.Extrude', 'Extrude', (['face_top_1', 'thickness'], {}), '(face_top_1, thickness)\n', (2685, 2708), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((3168, 3211), 'classy_blocks.classes.operations.Face', 'Face', (['face_top_2_vertices', 'face_top_2_edges'], {}), '(face_top_2_vertices, face_top_2_edges)\n', (3172, 3211), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((3232, 3262), 'classy_blocks.classes.operations.Extrude', 'Extrude', (['face_top_2', 'thickness'], {}), '(face_top_2, thickness)\n', (3239, 3262), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((3420, 3531), 'classy_blocks.classes.operations.Face', 'Face', (['[[chord, 0, 0], [domain_length, 0, 0], [domain_length, domain_radius, 0], [\n chord, domain_radius, 0]]'], {}), '([[chord, 0, 0], [domain_length, 0, 0], [domain_length, domain_radius, \n 0], [chord, domain_radius, 0]])\n', (3424, 3531), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((3585, 3615), 'classy_blocks.classes.operations.Extrude', 'Extrude', (['face_top_3', 'thickness'], {}), '(face_top_3, thickness)\n', (3592, 3615), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((4174, 4223), 'classy_blocks.classes.operations.Face', 'Face', (['face_bottom_1_vertices', 'face_bottom_1_edges'], {}), '(face_bottom_1_vertices, face_bottom_1_edges)\n', (4178, 4223), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((4247, 4280), 'classy_blocks.classes.operations.Extrude', 'Extrude', (['face_bottom_1', 'thickness'], {}), '(face_bottom_1, thickness)\n', (4254, 4280), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((4607, 4656), 'classy_blocks.classes.operations.Face', 'Face', (['face_bottom_2_vertices', 'face_bottom_2_edges'], {}), '(face_bottom_2_vertices, face_bottom_2_edges)\n', (4611, 4656), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((4680, 4713), 'classy_blocks.classes.operations.Extrude', 'Extrude', (['face_bottom_2', 'thickness'], {}), '(face_bottom_2, thickness)\n', (4687, 4713), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((4807, 4920), 'classy_blocks.classes.operations.Face', 'Face', (['[[chord, 0, 0], [chord, -domain_radius, 0], [domain_length, -domain_radius,\n 0], [domain_length, 0, 0]]'], {}), '([[chord, 0, 0], [chord, -domain_radius, 0], [domain_length, -\n domain_radius, 0], [domain_length, 0, 0]])\n', (4811, 4920), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((4977, 5010), 'classy_blocks.classes.operations.Extrude', 'Extrude', (['face_bottom_3', 'thickness'], {}), '(face_bottom_3, thickness)\n', (4984, 5010), False, 'from classy_blocks.classes.operations import Face, Extrude\n'), ((5024, 5030), 'classy_blocks.classes.mesh.Mesh', 'Mesh', ([], {}), '()\n', (5028, 5030), False, 'from classy_blocks.classes.mesh import Mesh\n'), ((1936, 1990), 'os.path.join', 'os.path.join', (['"""examples"""', '"""operation"""', '"""airfoil_1.dat"""'], {}), "('examples', 'operation', 'airfoil_1.dat')\n", (1948, 1990), False, 'import os\n'), ((4026, 4059), 'numpy.flip', 'np.flip', (['p_lower[0:i - 1]'], {'axis': '(0)'}), '(p_lower[0:i - 1], axis=0)\n', (4033, 4059), True, 'import numpy as np\n'), ((4556, 4584), 'numpy.flip', 'np.flip', (['p_lower[i:]'], {'axis': '(0)'}), '(p_lower[i:], axis=0)\n', (4563, 4584), True, 'import numpy as np\n'), ((1099, 1121), 'numpy.array', 'np.array', (['points_upper'], {}), '(points_upper)\n', (1107, 1121), True, 'import numpy as np\n'), ((1129, 1151), 'numpy.array', 'np.array', (['points_lower'], {}), '(points_lower)\n', (1137, 1151), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 05 16:40:31 2015
@author: <NAME>
"""
import numpy as np
import sklearn.metrics as skm
import sklearn.ensemble as ske
import sklearn.cross_validation as skcv
"""
Selects some trips from a main driver, and some trips from other drivers from the feature matrix, and labels them. There is also the option to exclude trips from the main driver, to prevent duplicate picking. The labels are done such that the main driver trips are labeled as 1, and the 'fake' trips are labeled as 0.
featurematrix: the matrix containing the features, with size (Features x Trips x Drivers)
mainDriverID: The page of the feature matrix that contains the features of the main driver.
numD: The number of trips from the main driver to pick
numF: The number of trips from other drivers to pick
alreadySelectedTrips: An array containing the trips from the main driver that have already been chosen.
output:
features: a (numD+numF) x Features matrix containing the features of the selected trips
labels: a (numD+numF) array containing zeros and ones, depending on what class the trip belongs to
chosentrips: the chosen trips, equal to the input, concatenated with the trips chosen in this function.
"""
def getTrips(featureMatrix, mainDriverId, numReal = 200, numFake=200):
numFea, numTri, numDri = np.shape(featureMatrix)
mainTrips = np.transpose(featureMatrix[:,:,mainDriverId])
mainTrips = mainTrips[:numReal, :]
randi = np.random.randint(numTri, size = (numFake, 1))
randj = np.random.randint(numDri, size = (numFake, 1))
randj[randj==mainDriverId] = (randj[randj==mainDriverId] + 1)%numDri
fakeTrips = np.array([featureMatrix[:, randi[i], randj[i]] for i in range(numFake)])
fakeTrips = np.reshape(fakeTrips, (numFake, numFea))
return np.vstack((mainTrips, fakeTrips)), np.concatenate((np.ones(numReal), np.zeros(numFake)))
"""
Creates a sampleweight array by giving the samples in class 1 and samples in class 0
a predetermined weight. Ideally, the sum of the weights is equal for class 0 and 1.
If a weight is equal to None, it is set so that the total weight of that class equals
the total weight of the other class. If both are set to None, that is impossible,
so oneval will be set to 1.0, and zeroval to None.
"""
def createSampleWeight(labels, oneval = None, zeroval = None):
if oneval is None and zeroval is None:
oneval = 1.0
if oneval is None:
zeroweight = np.count_nonzero(labels == 0) * zeroval
oneval = np.count_nonzero(labels == 1) / zeroweight
if zeroval is None:
oneweight = np.count_nonzero(labels == 1) * oneval
zeroval = np.count_nonzero(labels == 0) / oneweight
samples = len(labels)
scores = np.zeros(samples)
scores[labels == 1] = oneval
scores[labels == 0] = zeroval
return scores
"""
Evaluates a given probability array with 'gold' standard labels and a threshold
As output, gives precision and recall of both classes.
probabilities: an array containing the probability that the trip belongs to class 1
labels: the actual label of the trip
threshold: how high the probability needs to be for the trip to be considered a part of class 1
"""
def evaluation(probabilities, labels, threshold):
probabilities = (probabilities >= threshold).astype(int)
precision = skm.precision_score(labels, probabilities, average=None)
recall = skm.recall_score(labels, probabilities, average=None)
return precision, recall
"""
Calulates average accuracy by doing crossvalidation
Should be faster than submitting to kaggle, because you can do it more than 5 times a day
also, it only does 100 drivers by default, instead of 2500+.
The downside is that the score will be less accurate than kaggle's score, because
we don't actually know what the fake trips are
"""
def crossValidation(featureMatrix, model = None, numdrivers = 100, folds = 5, numReal = 200, numFake = 200):
#foldsize = int(numdrivers / folds)
numD = np.shape(featureMatrix)[2]
if model is None:
model = ske.RandomForestClassifier(n_estimators = 50, n_jobs = -1, criterion = 'gini', max_features = 'auto')
testDrivers = np.random.choice(np.arange(numD), numdrivers, False)
score = 0
for i in testDrivers:
trips, labels = getTrips(featureMatrix, i, numReal, numFake)
result = skcv.cross_val_score(model, trips, labels)
score = score + np.divide(np.mean(result), numdrivers)
return score
"""
Trains a model using logistic regression, given some features, and their true class.
features: a list of trip features, in a (trips x features) size
labels: the true labels of the given trips, in array form
penalty, dual, tol, C, class_weight: argument for the logistic regression classifier
see sk_learn documentation for info on what they do
"""
def trainModel(features, labels, criterion = 'gini', max_features = 'auto', n_trees = 10, sample_weight = None, n_jobs = 1):
model = ske.RandomForestClassifier(n_estimators = n_trees, criterion = criterion, max_features = max_features, n_jobs = n_jobs)
return model.fit(features, labels, sample_weight = sample_weight)
"""
Given a model trained with the trainModel function, and some features, gives an array with the probabilities those features belong to class 1 (aka are real trips)
"""
def predictClass(model, features):
return model.predict_proba(features)[:,1]
def printMatlabStyle(threedmatrix):
for i in range(np.shape(threedmatrix)[2]):
print('matrix[:,:,' + repr(i) + '] = ')
print(threedmatrix[:,:,i])
# Example of the above functions, the input is a feature matrix, output is a trained model and feedback on it classfying something
if __name__ == "__main__":
featureMatrix = np.array([[[4,2,3], [4,1,2],[6,2,1], [4,1,2]], [[2,3,1],[6,3,1],[4,1,1],[8,8,8]]])
numF, numT, numD = np.shape(featureMatrix)
print((numF, numT, numD))
#trainTrips, trainLabels, chosen = getTrips(featureMatrix, 0, 2, 5)
#model = trainModel(trainTrips, trainLabels)
#testTrips, testLabels, _ = getTrips(featureMatrix, 0, 1, 1, chosen)
#predictions = predictClass(model, testTrips)
#print evaluation(predictions, testLabels, 0.5)
printMatlabStyle(featureMatrix)
print((getTrips(featureMatrix, 0, numT, numT)))
| [
"numpy.mean",
"numpy.reshape",
"numpy.ones",
"sklearn.cross_validation.cross_val_score",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.count_nonzero",
"numpy.vstack",
... | [((1369, 1392), 'numpy.shape', 'np.shape', (['featureMatrix'], {}), '(featureMatrix)\n', (1377, 1392), True, 'import numpy as np\n'), ((1410, 1457), 'numpy.transpose', 'np.transpose', (['featureMatrix[:, :, mainDriverId]'], {}), '(featureMatrix[:, :, mainDriverId])\n', (1422, 1457), True, 'import numpy as np\n'), ((1509, 1553), 'numpy.random.randint', 'np.random.randint', (['numTri'], {'size': '(numFake, 1)'}), '(numTri, size=(numFake, 1))\n', (1526, 1553), True, 'import numpy as np\n'), ((1569, 1613), 'numpy.random.randint', 'np.random.randint', (['numDri'], {'size': '(numFake, 1)'}), '(numDri, size=(numFake, 1))\n', (1586, 1613), True, 'import numpy as np\n'), ((1799, 1839), 'numpy.reshape', 'np.reshape', (['fakeTrips', '(numFake, numFea)'], {}), '(fakeTrips, (numFake, numFea))\n', (1809, 1839), True, 'import numpy as np\n'), ((2816, 2833), 'numpy.zeros', 'np.zeros', (['samples'], {}), '(samples)\n', (2824, 2833), True, 'import numpy as np\n'), ((3427, 3483), 'sklearn.metrics.precision_score', 'skm.precision_score', (['labels', 'probabilities'], {'average': 'None'}), '(labels, probabilities, average=None)\n', (3446, 3483), True, 'import sklearn.metrics as skm\n'), ((3499, 3552), 'sklearn.metrics.recall_score', 'skm.recall_score', (['labels', 'probabilities'], {'average': 'None'}), '(labels, probabilities, average=None)\n', (3515, 3552), True, 'import sklearn.metrics as skm\n'), ((5096, 5211), 'sklearn.ensemble.RandomForestClassifier', 'ske.RandomForestClassifier', ([], {'n_estimators': 'n_trees', 'criterion': 'criterion', 'max_features': 'max_features', 'n_jobs': 'n_jobs'}), '(n_estimators=n_trees, criterion=criterion,\n max_features=max_features, n_jobs=n_jobs)\n', (5122, 5211), True, 'import sklearn.ensemble as ske\n'), ((5907, 6014), 'numpy.array', 'np.array', (['[[[4, 2, 3], [4, 1, 2], [6, 2, 1], [4, 1, 2]], [[2, 3, 1], [6, 3, 1], [4, 1,\n 1], [8, 8, 8]]]'], {}), '([[[4, 2, 3], [4, 1, 2], [6, 2, 1], [4, 1, 2]], [[2, 3, 1], [6, 3, \n 1], [4, 1, 1], [8, 8, 8]]])\n', (5915, 6014), True, 'import numpy as np\n'), ((6014, 6037), 'numpy.shape', 'np.shape', (['featureMatrix'], {}), '(featureMatrix)\n', (6022, 6037), True, 'import numpy as np\n'), ((1853, 1886), 'numpy.vstack', 'np.vstack', (['(mainTrips, fakeTrips)'], {}), '((mainTrips, fakeTrips))\n', (1862, 1886), True, 'import numpy as np\n'), ((4101, 4124), 'numpy.shape', 'np.shape', (['featureMatrix'], {}), '(featureMatrix)\n', (4109, 4124), True, 'import numpy as np\n'), ((4168, 4265), 'sklearn.ensemble.RandomForestClassifier', 'ske.RandomForestClassifier', ([], {'n_estimators': '(50)', 'n_jobs': '(-1)', 'criterion': '"""gini"""', 'max_features': '"""auto"""'}), "(n_estimators=50, n_jobs=-1, criterion='gini',\n max_features='auto')\n", (4194, 4265), True, 'import sklearn.ensemble as ske\n'), ((4306, 4321), 'numpy.arange', 'np.arange', (['numD'], {}), '(numD)\n', (4315, 4321), True, 'import numpy as np\n'), ((4472, 4514), 'sklearn.cross_validation.cross_val_score', 'skcv.cross_val_score', (['model', 'trips', 'labels'], {}), '(model, trips, labels)\n', (4492, 4514), True, 'import sklearn.cross_validation as skcv\n'), ((2524, 2553), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels == 0)'], {}), '(labels == 0)\n', (2540, 2553), True, 'import numpy as np\n'), ((2582, 2611), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels == 1)'], {}), '(labels == 1)\n', (2598, 2611), True, 'import numpy as np\n'), ((2671, 2700), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels == 1)'], {}), '(labels == 1)\n', (2687, 2700), True, 'import numpy as np\n'), ((2729, 2758), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels == 0)'], {}), '(labels == 0)\n', (2745, 2758), True, 'import numpy as np\n'), ((5611, 5633), 'numpy.shape', 'np.shape', (['threedmatrix'], {}), '(threedmatrix)\n', (5619, 5633), True, 'import numpy as np\n'), ((1904, 1920), 'numpy.ones', 'np.ones', (['numReal'], {}), '(numReal)\n', (1911, 1920), True, 'import numpy as np\n'), ((1922, 1939), 'numpy.zeros', 'np.zeros', (['numFake'], {}), '(numFake)\n', (1930, 1939), True, 'import numpy as np\n'), ((4550, 4565), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (4557, 4565), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.layout as lay
import wisdem.drivetrainse.drive_structure as ds
from wisdem.commonse import gravity
npts = 12
class TestDirectStructure(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
self.opt = {}
self.discrete_inputs["upwind"] = True
self.inputs["L_12"] = 2.0
self.inputs["L_h1"] = 1.0
self.inputs["L_generator"] = 3.25
# self.inputs['L_2n'] = 1.5
# self.inputs['L_grs'] = 1.1
# self.inputs['L_gsn'] = 1.1
self.inputs["L_hss"] = 0.75
self.inputs["L_gearbox"] = 1.2
self.inputs["overhang"] = 6.25
self.inputs["drive_height"] = 4.875
self.inputs["tilt"] = 4.0
self.inputs["access_diameter"] = 0.9
myones = np.ones(5)
self.inputs["lss_diameter"] = 3.3 * myones
self.inputs["lss_wall_thickness"] = 0.45 * myones
self.inputs["hss_diameter"] = 1.6 * np.ones(3)
self.inputs["hss_wall_thickness"] = 0.25 * np.ones(3)
self.inputs["nose_diameter"] = 2.2 * myones
self.inputs["nose_wall_thickness"] = 0.1 * myones
self.inputs["bedplate_wall_thickness"] = 0.06 * np.ones(npts)
self.inputs["bedplate_flange_width"] = 1.5
self.inputs["bedplate_flange_thickness"] = 0.05
# self.inputs['bedplate_web_height'] = 1.0
self.inputs["bedplate_web_thickness"] = 0.05
self.inputs["D_top"] = 6.5
self.inputs["hub_diameter"] = 4.0
self.inputs["other_mass"] = 200e3
self.inputs["mb1_mass"] = 10e3
self.inputs["mb1_I"] = 10e3 * 0.5 * 2 ** 2 * np.ones(3)
self.inputs["mb2_mass"] = 10e3
self.inputs["mb2_I"] = 10e3 * 0.5 * 1.5 ** 2 * np.ones(3)
self.inputs["mb1_max_defl_ang"] = 0.008
self.inputs["mb2_max_defl_ang"] = 0.008
self.inputs["m_stator"] = 100e3
self.inputs["cm_stator"] = -0.3
self.inputs["I_stator"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_rotor_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_rotor_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_stator_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_stator_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_mass"] = 200e3
self.inputs["generator_I"] = np.array([2e6, 1e6, 1e6, 0.0, 0.0, 0.0])
self.inputs["gearbox_mass"] = 100e3
self.inputs["gearbox_I"] = np.array([1e6, 5e5, 5e5])
self.inputs["brake_mass"] = 10e3
self.inputs["brake_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["carrier_mass"] = 10e3
self.inputs["carrier_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["gear_ratio"] = 1.0
self.inputs["F_mb1"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["M_mb1"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["M_mb2"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["hub_system_mass"] = 100e3
self.inputs["hub_system_cm"] = 2.0
self.inputs["hub_system_I"] = np.array([2409.750e3, -1716.429e3, 74.3529e3, 0.0, 0.0, 0.0])
self.inputs["F_hub"] = np.array([2409.750e3, 0.0, 74.3529e2]).reshape((3, 1))
self.inputs["M_hub"] = np.array([-1.83291e4, 6171.7324e2, 5785.82946e2]).reshape((3, 1))
self.inputs["lss_E"] = self.inputs["hss_E"] = self.inputs["bedplate_E"] = 210e9
self.inputs["lss_G"] = self.inputs["hss_G"] = self.inputs["bedplate_G"] = 80.8e9
self.inputs["lss_rho"] = self.inputs["hss_rho"] = self.inputs["bedplate_rho"] = 7850.0
self.inputs["lss_Xy"] = self.inputs["hss_Xy"] = self.inputs["bedplate_Xy"] = 250e6
self.opt["gamma_f"] = 1.35
self.opt["gamma_m"] = 1.3
self.opt["gamma_n"] = 1.0
def compute_layout(self, direct=True):
myobj = lay.DirectLayout() if direct else lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
for k in self.outputs.keys():
self.inputs[k] = self.outputs[k]
def testBaseF_BaseM(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Downwind(self):
self.inputs["tilt"] = 0.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt_Downwind(self):
self.inputs["tilt"] = 5.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Geared(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["F_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["F_generator"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["M_generator"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Bedplate_IBeam_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
F0 = self.outputs["base_F"][:, 0]
M0 = self.outputs["base_M"][:, 0]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity, decimal=0)
# npt.assert_almost_equal(self.outputs['base_M'], M0+self.inputs['M_mb1']+self.inputs['M_mb2'], decimal=-1)
self.inputs["F_mb1"] = self.inputs["F_mb2"] = self.inputs["F_generator"] = self.inputs["F_torq"] = np.array(
[30e2, 40e2, 50e2]
).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 4 * self.inputs["F_mb1"][:2, 0], decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity + 4 * 50e2, decimal=0)
def testBaseF_BaseM_withTilt_Geared(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["F_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["F_generator"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["M_generator"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Bedplate_IBeam_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
F0 = self.outputs["base_F"][:, 0]
M0 = self.outputs["base_M"][:, 0]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity, decimal=0)
# npt.assert_almost_equal(self.outputs['base_M'], M0+self.inputs['M_mb1']+self.inputs['M_mb2'], decimal=-1)
self.inputs["F_mb1"] = self.inputs["F_mb2"] = self.inputs["F_generator"] = self.inputs["F_torq"] = np.array(
[30e2, 40e2, 50e2]
).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1, 0], 4 * self.inputs["F_mb1"][1, 0], decimal=1)
def testRunRotatingDirect_noTilt(self):
self.inputs["tilt"] = 0.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=True)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testRunRotatingDirect_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=True)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[0])
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][1], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=1) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testRunRotatingGeared_noTilt(self):
self.inputs["tilt"] = 0.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=False)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testRunRotatingGeared_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=False)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
F0 = self.outputs["F_mb1"].flatten()
M0 = self.outputs["M_mb2"].flatten()
self.assertGreater(0.0, F0[0])
self.assertGreater(0.0, F0[-1])
# self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_mb1"][1], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"][[0, 2]], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_torq"], 0.0, decimal=2)
self.assertAlmostEqual(
self.outputs["lss_spring_constant"], 80.8e9 * np.pi * (3.3 ** 4 - 2.4 ** 4) / 32 / self.inputs["L_lss"], 4
)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["F_mb1"].flatten(), g + F0, decimal=2)
npt.assert_almost_equal(self.outputs["F_mb2"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["F_torq"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb1"], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[1], g[-1] * 1 + 2 * g[1] + M0[1], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_mb2"].flatten()[2], -g[1] * 1 + 2 * g[2], decimal=2) # *1=*L_h1
npt.assert_almost_equal(self.outputs["M_torq"].flatten(), np.r_[2 * g[0], 0.0, 0.0], decimal=2)
def testHSS_noTilt(self):
self.inputs["tilt"] = 0.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
F0 = self.outputs["F_generator"].flatten()
M0 = self.outputs["M_generator"].flatten()
self.assertGreater(0.0, F0[-1])
self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_generator"].flatten()[:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten()[[0, 2]], 0.0, decimal=2)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
npt.assert_almost_equal(self.outputs["F_generator"].flatten(), F0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten(), np.r_[2 * g[0] / 50.0, M0[1], 0.0], decimal=2)
def testHSS_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["gear_ratio"] = 50.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
F0 = self.outputs["F_generator"].flatten()
M0 = self.outputs["M_generator"].flatten()
self.assertGreater(0.0, F0[0])
self.assertGreater(0.0, F0[-1])
self.assertGreater(0.0, M0[1])
npt.assert_almost_equal(self.outputs["F_generator"].flatten()[1], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten()[[0, 2]], 0.0, decimal=2)
g = np.array([30e2, 40e2, 50e2])
self.inputs["F_hub"] = g.reshape((3, 1))
self.inputs["M_hub"] = 2 * g.reshape((3, 1))
self.compute_layout(False)
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
npt.assert_almost_equal(self.outputs["F_generator"].flatten(), F0, decimal=2)
npt.assert_almost_equal(self.outputs["M_generator"].flatten(), np.r_[2 * g[0] / 50.0, M0[1], 0.0], decimal=2)
def testShaftTheoryLSS(self):
# https://www.engineersedge.com/calculators/torsional-stress-calculator.htm
self.inputs["tilt"] = 0.0
self.inputs["F_hub"] = np.zeros(3).reshape((3, 1))
self.inputs["M_hub"] = np.array([1e5, 0.0, 0.0]).reshape((3, 1))
self.inputs["brake_mass"] = 0.0
self.inputs["brake_I"] = np.zeros(3)
self.inputs["generator_rotor_mass"] = 0.0
self.inputs["cm_rotor"] = 0.0
self.inputs["generator_rotor_I"] = np.zeros(6)
self.inputs["hub_system_mass"] = 0.0
self.inputs["hub_system_cm"] = 0.0
self.inputs["hub_system_I"] = np.zeros(6)
myones = np.ones(5)
self.inputs["lss_diameter"] = 5 * myones
self.inputs["lss_wall_thickness"] = 0.5 * myones
self.inputs["G"] = 100e9
self.inputs["lss_rho"] = 1e-6
self.compute_layout()
myobj = ds.Hub_Rotor_LSS_Frame(n_dlcs=1, modeling_options=self.opt, direct_drive=True)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
J = 0.5 * np.pi * (2.5 ** 4 - 2 ** 4)
sigma = 1e5 / J * 2.5
npt.assert_almost_equal(self.outputs["lss_axial_stress"], 0.0, decimal=4)
npt.assert_almost_equal(self.outputs["lss_shear_stress"].flatten(), np.r_[np.zeros(3), sigma], decimal=4)
def testShaftTheoryHSS(self):
# https://www.engineersedge.com/calculators/torsional-stress-calculator.htm
self.inputs["tilt"] = 0.0
self.inputs["gear_ratio"] = 50.0
self.inputs["s_hss"] = np.array([0.0, 0.5, 1.0])
self.inputs["M_hub"] = np.array([1e5, 0.0, 0.0]).reshape((3, 1))
self.inputs["s_generator"] = 0.0
self.inputs["generator_mass"] = 0.0
self.inputs["generator_I"] = np.zeros(3)
self.inputs["brake_mass"] = 0.0
self.inputs["brake_I"] = np.zeros(3)
self.inputs["hub_system_mass"] = 0.0
self.inputs["hub_system_cm"] = 0.0
self.inputs["hub_system_I"] = np.zeros(6)
myones = np.ones(3)
self.inputs["hss_diameter"] = 5 * myones
self.inputs["hss_wall_thickness"] = 0.5 * myones
self.inputs["G"] = 100e9
self.inputs["hss_rho"] = 1e-6
self.compute_layout()
myobj = ds.HSS_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs)
J = 0.5 * np.pi * (2.5 ** 4 - 2 ** 4)
sigma = 1e5 / 50.0 / J * 2.5
npt.assert_almost_equal(self.outputs["hss_axial_stress"], 0.0, decimal=4)
npt.assert_almost_equal(self.outputs["hss_bending_stress"], 0.0, decimal=4)
npt.assert_almost_equal(self.outputs["hss_shear_stress"].flatten(), sigma * np.ones(2), decimal=4)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDirectStructure))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
| [
"unittest.TestSuite",
"wisdem.drivetrainse.layout.GearedLayout",
"wisdem.drivetrainse.drive_structure.Bedplate_IBeam_Frame",
"wisdem.drivetrainse.layout.DirectLayout",
"numpy.ones",
"unittest.makeSuite",
"wisdem.drivetrainse.drive_structure.HSS_Frame",
"numpy.array",
"numpy.testing.assert_almost_equ... | [((33603, 33623), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (33621, 33623), False, 'import unittest\n'), ((935, 945), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (942, 945), True, 'import numpy as np\n'), ((2104, 2160), 'numpy.array', 'np.array', (['[1000000.0, 500000.0, 500000.0, 0.0, 0.0, 0.0]'], {}), '([1000000.0, 500000.0, 500000.0, 0.0, 0.0, 0.0])\n', (2112, 2160), True, 'import numpy as np\n'), ((2280, 2336), 'numpy.array', 'np.array', (['[1000000.0, 500000.0, 500000.0, 0.0, 0.0, 0.0]'], {}), '([1000000.0, 500000.0, 500000.0, 0.0, 0.0, 0.0])\n', (2288, 2336), True, 'import numpy as np\n'), ((2458, 2514), 'numpy.array', 'np.array', (['[1000000.0, 500000.0, 500000.0, 0.0, 0.0, 0.0]'], {}), '([1000000.0, 500000.0, 500000.0, 0.0, 0.0, 0.0])\n', (2466, 2514), True, 'import numpy as np\n'), ((2583, 2641), 'numpy.array', 'np.array', (['[2000000.0, 1000000.0, 1000000.0, 0.0, 0.0, 0.0]'], {}), '([2000000.0, 1000000.0, 1000000.0, 0.0, 0.0, 0.0])\n', (2591, 2641), True, 'import numpy as np\n'), ((2704, 2745), 'numpy.array', 'np.array', (['[1000000.0, 500000.0, 500000.0]'], {}), '([1000000.0, 500000.0, 500000.0])\n', (2712, 2745), True, 'import numpy as np\n'), ((2805, 2840), 'numpy.array', 'np.array', (['[10000.0, 5000.0, 5000.0]'], {}), '([10000.0, 5000.0, 5000.0])\n', (2813, 2840), True, 'import numpy as np\n'), ((2910, 2945), 'numpy.array', 'np.array', (['[10000.0, 5000.0, 5000.0]'], {}), '([10000.0, 5000.0, 5000.0])\n', (2918, 2945), True, 'import numpy as np\n'), ((3489, 3546), 'numpy.array', 'np.array', (['[2409750.0, -1716429.0, 74352.9, 0.0, 0.0, 0.0]'], {}), '([2409750.0, -1716429.0, 74352.9, 0.0, 0.0, 0.0])\n', (3497, 3546), True, 'import numpy as np\n'), ((4840, 4906), 'wisdem.drivetrainse.drive_structure.Nose_Stator_Bedplate_Frame', 'ds.Nose_Stator_Bedplate_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (4869, 4906), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((5009, 5065), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (5032, 5065), True, 'import numpy.testing as npt\n'), ((5074, 5129), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (5097, 5129), True, 'import numpy.testing as npt\n'), ((5138, 5194), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][-1]", '(0.0)'], {}), "(self.outputs['base_M'][-1], 0.0)\n", (5161, 5194), True, 'import numpy.testing as npt\n'), ((5413, 5469), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (5436, 5469), True, 'import numpy.testing as npt\n'), ((5478, 5556), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (5501, 5556), True, 'import numpy.testing as npt\n'), ((5562, 5617), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (5585, 5617), True, 'import numpy.testing as npt\n'), ((5626, 5683), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", 'M0[1]'], {}), "(self.outputs['base_M'][1], M0[1])\n", (5649, 5683), True, 'import numpy.testing as npt\n'), ((5692, 5747), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][2]", '(0.0)'], {}), "(self.outputs['base_M'][2], 0.0)\n", (5715, 5747), True, 'import numpy.testing as npt\n'), ((5921, 5977), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (5944, 5977), True, 'import numpy.testing as npt\n'), ((5986, 6064), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (6009, 6064), True, 'import numpy.testing as npt\n'), ((6070, 6159), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M']", "(M0 + self.inputs['M_mb1'])"], {'decimal': '(0)'}), "(self.outputs['base_M'], M0 + self.inputs['M_mb1'],\n decimal=0)\n", (6093, 6159), True, 'import numpy.testing as npt\n'), ((6329, 6385), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (6352, 6385), True, 'import numpy.testing as npt\n'), ((6394, 6472), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (6417, 6472), True, 'import numpy.testing as npt\n'), ((6478, 6591), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M']", "(M0 + self.inputs['M_mb1'] + self.inputs['M_mb2'])"], {'decimal': '(-1)'}), "(self.outputs['base_M'], M0 + self.inputs['M_mb1'] +\n self.inputs['M_mb2'], decimal=-1)\n", (6501, 6591), True, 'import numpy.testing as npt\n'), ((6843, 6929), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", "(2 * self.inputs['F_mb2'][:2])"], {}), "(self.outputs['base_F'][:2], 2 * self.inputs['F_mb2'\n ][:2])\n", (6866, 6929), True, 'import numpy.testing as npt\n'), ((6933, 7028), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity + 2 * 5000.0)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 *\n gravity + 2 * 5000.0)\n", (6956, 7028), True, 'import numpy.testing as npt\n'), ((7377, 7443), 'wisdem.drivetrainse.drive_structure.Nose_Stator_Bedplate_Frame', 'ds.Nose_Stator_Bedplate_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (7406, 7443), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((7546, 7613), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (7569, 7613), True, 'import numpy.testing as npt\n'), ((7622, 7677), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (7645, 7677), True, 'import numpy.testing as npt\n'), ((7686, 7742), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][-1]", '(0.0)'], {}), "(self.outputs['base_M'][-1], 0.0)\n", (7709, 7742), True, 'import numpy.testing as npt\n'), ((7961, 8028), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (7984, 8028), True, 'import numpy.testing as npt\n'), ((8037, 8115), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (8060, 8115), True, 'import numpy.testing as npt\n'), ((8121, 8176), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (8144, 8176), True, 'import numpy.testing as npt\n'), ((8185, 8242), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", 'M0[1]'], {}), "(self.outputs['base_M'][1], M0[1])\n", (8208, 8242), True, 'import numpy.testing as npt\n'), ((8251, 8306), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][2]", '(0.0)'], {}), "(self.outputs['base_M'][2], 0.0)\n", (8274, 8306), True, 'import numpy.testing as npt\n'), ((8480, 8547), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (8503, 8547), True, 'import numpy.testing as npt\n'), ((8556, 8634), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (8579, 8634), True, 'import numpy.testing as npt\n'), ((8640, 8739), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", "(M0[1] + self.inputs['M_mb1'][1])"], {'decimal': '(0)'}), "(self.outputs['base_M'][1], M0[1] + self.inputs[\n 'M_mb1'][1], decimal=0)\n", (8663, 8739), True, 'import numpy.testing as npt\n'), ((8908, 8975), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (8931, 8975), True, 'import numpy.testing as npt\n'), ((8984, 9062), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (9007, 9062), True, 'import numpy.testing as npt\n'), ((9068, 9194), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", "(M0[1] + self.inputs['M_mb1'][1] + self.inputs['M_mb2'][1])"], {'decimal': '(-1)'}), "(self.outputs['base_M'][1], M0[1] + self.inputs[\n 'M_mb1'][1] + self.inputs['M_mb2'][1], decimal=-1)\n", (9091, 9194), True, 'import numpy.testing as npt\n'), ((9467, 9546), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][1]", "(2 * self.inputs['F_mb2'][1])"], {}), "(self.outputs['base_F'][1], 2 * self.inputs['F_mb2'][1])\n", (9490, 9546), True, 'import numpy.testing as npt\n'), ((9951, 10017), 'wisdem.drivetrainse.drive_structure.Nose_Stator_Bedplate_Frame', 'ds.Nose_Stator_Bedplate_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (9980, 10017), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((10120, 10176), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (10143, 10176), True, 'import numpy.testing as npt\n'), ((10185, 10240), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (10208, 10240), True, 'import numpy.testing as npt\n'), ((10249, 10305), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][-1]", '(0.0)'], {}), "(self.outputs['base_M'][-1], 0.0)\n", (10272, 10305), True, 'import numpy.testing as npt\n'), ((10524, 10580), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (10547, 10580), True, 'import numpy.testing as npt\n'), ((10589, 10667), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (10612, 10667), True, 'import numpy.testing as npt\n'), ((10673, 10728), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (10696, 10728), True, 'import numpy.testing as npt\n'), ((10737, 10794), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", 'M0[1]'], {}), "(self.outputs['base_M'][1], M0[1])\n", (10760, 10794), True, 'import numpy.testing as npt\n'), ((10803, 10858), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][2]", '(0.0)'], {}), "(self.outputs['base_M'][2], 0.0)\n", (10826, 10858), True, 'import numpy.testing as npt\n'), ((11032, 11088), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (11055, 11088), True, 'import numpy.testing as npt\n'), ((11097, 11175), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (11120, 11175), True, 'import numpy.testing as npt\n'), ((11181, 11270), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M']", "(M0 + self.inputs['M_mb1'])"], {'decimal': '(0)'}), "(self.outputs['base_M'], M0 + self.inputs['M_mb1'],\n decimal=0)\n", (11204, 11270), True, 'import numpy.testing as npt\n'), ((11440, 11496), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {}), "(self.outputs['base_F'][:2], 0.0)\n", (11463, 11496), True, 'import numpy.testing as npt\n'), ((11505, 11583), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (11528, 11583), True, 'import numpy.testing as npt\n'), ((11589, 11702), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M']", "(M0 + self.inputs['M_mb1'] + self.inputs['M_mb2'])"], {'decimal': '(-1)'}), "(self.outputs['base_M'], M0 + self.inputs['M_mb1'] +\n self.inputs['M_mb2'], decimal=-1)\n", (11612, 11702), True, 'import numpy.testing as npt\n'), ((11954, 12040), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", "(2 * self.inputs['F_mb2'][:2])"], {}), "(self.outputs['base_F'][:2], 2 * self.inputs['F_mb2'\n ][:2])\n", (11977, 12040), True, 'import numpy.testing as npt\n'), ((12044, 12139), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity + 2 * 5000.0)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 *\n gravity + 2 * 5000.0)\n", (12067, 12139), True, 'import numpy.testing as npt\n'), ((12544, 12610), 'wisdem.drivetrainse.drive_structure.Nose_Stator_Bedplate_Frame', 'ds.Nose_Stator_Bedplate_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (12573, 12610), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((12713, 12780), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (12736, 12780), True, 'import numpy.testing as npt\n'), ((12789, 12844), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (12812, 12844), True, 'import numpy.testing as npt\n'), ((12853, 12909), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][-1]", '(0.0)'], {}), "(self.outputs['base_M'][-1], 0.0)\n", (12876, 12909), True, 'import numpy.testing as npt\n'), ((13128, 13195), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (13151, 13195), True, 'import numpy.testing as npt\n'), ((13204, 13282), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (13227, 13282), True, 'import numpy.testing as npt\n'), ((13288, 13343), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][0]", '(0.0)'], {}), "(self.outputs['base_M'][0], 0.0)\n", (13311, 13343), True, 'import numpy.testing as npt\n'), ((13352, 13409), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", 'M0[1]'], {}), "(self.outputs['base_M'][1], M0[1])\n", (13375, 13409), True, 'import numpy.testing as npt\n'), ((13418, 13473), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][2]", '(0.0)'], {}), "(self.outputs['base_M'][2], 0.0)\n", (13441, 13473), True, 'import numpy.testing as npt\n'), ((13647, 13714), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (13670, 13714), True, 'import numpy.testing as npt\n'), ((13723, 13801), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (13746, 13801), True, 'import numpy.testing as npt\n'), ((13807, 13906), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", "(M0[1] + self.inputs['M_mb1'][1])"], {'decimal': '(0)'}), "(self.outputs['base_M'][1], M0[1] + self.inputs[\n 'M_mb1'][1], decimal=0)\n", (13830, 13906), True, 'import numpy.testing as npt\n'), ((14075, 14142), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2], 0.0, decimal=2)\n", (14098, 14142), True, 'import numpy.testing as npt\n'), ((14151, 14229), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2], F0[2] - 500000.0 * gravity)\n", (14174, 14229), True, 'import numpy.testing as npt\n'), ((14235, 14361), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", "(M0[1] + self.inputs['M_mb1'][1] + self.inputs['M_mb2'][1])"], {'decimal': '(-1)'}), "(self.outputs['base_M'][1], M0[1] + self.inputs[\n 'M_mb1'][1] + self.inputs['M_mb2'][1], decimal=-1)\n", (14258, 14361), True, 'import numpy.testing as npt\n'), ((14634, 14713), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][1]", "(2 * self.inputs['F_mb2'][1])"], {}), "(self.outputs['base_F'][1], 2 * self.inputs['F_mb2'][1])\n", (14657, 14713), True, 'import numpy.testing as npt\n'), ((15324, 15384), 'wisdem.drivetrainse.drive_structure.Bedplate_IBeam_Frame', 'ds.Bedplate_IBeam_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (15347, 15384), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((15487, 15557), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2, 0], 0.0, decimal=2)\n", (15510, 15557), True, 'import numpy.testing as npt\n'), ((15566, 15640), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][[0, 2], 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_M'][[0, 2], 0], 0.0, decimal=2)\n", (15589, 15640), True, 'import numpy.testing as npt\n'), ((15871, 15941), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2, 0], 0.0, decimal=2)\n", (15894, 15941), True, 'import numpy.testing as npt\n'), ((15950, 16035), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2, 0]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2, 0], F0[2] - 500000.0 *\n gravity)\n", (15973, 16035), True, 'import numpy.testing as npt\n'), ((16037, 16111), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][[0, 2], 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_M'][[0, 2], 0], 0.0, decimal=2)\n", (16060, 16111), True, 'import numpy.testing as npt\n'), ((16120, 16177), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", 'M0[1]'], {}), "(self.outputs['base_M'][1], M0[1])\n", (16143, 16177), True, 'import numpy.testing as npt\n'), ((16421, 16491), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2, 0], 0.0, decimal=2)\n", (16444, 16491), True, 'import numpy.testing as npt\n'), ((16500, 16596), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2, 0]", '(F0[2] - 500000.0 * gravity)'], {'decimal': '(0)'}), "(self.outputs['base_F'][2, 0], F0[2] - 500000.0 *\n gravity, decimal=0)\n", (16523, 16596), True, 'import numpy.testing as npt\n'), ((16983, 17086), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", "(4 * self.inputs['F_mb1'][:2, 0])"], {'decimal': '(1)'}), "(self.outputs['base_F'][:2, 0], 4 * self.inputs[\n 'F_mb1'][:2, 0], decimal=1)\n", (17006, 17086), True, 'import numpy.testing as npt\n'), ((17090, 17199), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2, 0]", '(F0[2] - 500000.0 * gravity + 4 * 5000.0)'], {'decimal': '(0)'}), "(self.outputs['base_F'][2, 0], F0[2] - 500000.0 *\n gravity + 4 * 5000.0, decimal=0)\n", (17113, 17199), True, 'import numpy.testing as npt\n'), ((17810, 17870), 'wisdem.drivetrainse.drive_structure.Bedplate_IBeam_Frame', 'ds.Bedplate_IBeam_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (17833, 17870), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((17973, 18043), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_F'][:2, 0], 0.0, decimal=2)\n", (17996, 18043), True, 'import numpy.testing as npt\n'), ((18052, 18126), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][[0, 2], 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_M'][[0, 2], 0], 0.0, decimal=2)\n", (18075, 18126), True, 'import numpy.testing as npt\n'), ((18357, 18427), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", '(0.0)'], {'decimal': '(1)'}), "(self.outputs['base_F'][:2, 0], 0.0, decimal=1)\n", (18380, 18427), True, 'import numpy.testing as npt\n'), ((18436, 18521), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2, 0]", '(F0[2] - 500000.0 * gravity)'], {}), "(self.outputs['base_F'][2, 0], F0[2] - 500000.0 *\n gravity)\n", (18459, 18521), True, 'import numpy.testing as npt\n'), ((18523, 18597), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][[0, 2], 0]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['base_M'][[0, 2], 0], 0.0, decimal=2)\n", (18546, 18597), True, 'import numpy.testing as npt\n'), ((18606, 18663), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_M'][1]", 'M0[1]'], {}), "(self.outputs['base_M'][1], M0[1])\n", (18629, 18663), True, 'import numpy.testing as npt\n'), ((18907, 18977), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][:2, 0]", '(0.0)'], {'decimal': '(1)'}), "(self.outputs['base_F'][:2, 0], 0.0, decimal=1)\n", (18930, 18977), True, 'import numpy.testing as npt\n'), ((18986, 19082), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][2, 0]", '(F0[2] - 500000.0 * gravity)'], {'decimal': '(0)'}), "(self.outputs['base_F'][2, 0], F0[2] - 500000.0 *\n gravity, decimal=0)\n", (19009, 19082), True, 'import numpy.testing as npt\n'), ((19469, 19570), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['base_F'][1, 0]", "(4 * self.inputs['F_mb1'][1, 0])"], {'decimal': '(1)'}), "(self.outputs['base_F'][1, 0], 4 * self.inputs[\n 'F_mb1'][1, 0], decimal=1)\n", (19492, 19570), True, 'import numpy.testing as npt\n'), ((19809, 19887), 'wisdem.drivetrainse.drive_structure.Hub_Rotor_LSS_Frame', 'ds.Hub_Rotor_LSS_Frame', ([], {'n_dlcs': '(1)', 'modeling_options': 'self.opt', 'direct_drive': '(True)'}), '(n_dlcs=1, modeling_options=self.opt, direct_drive=True)\n', (19831, 19887), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((20161, 20227), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb1'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb1'][:2], 0.0, decimal=2)\n", (20184, 20227), True, 'import numpy.testing as npt\n'), ((20236, 20298), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (20259, 20298), True, 'import numpy.testing as npt\n'), ((20307, 20370), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (20330, 20370), True, 'import numpy.testing as npt\n'), ((20379, 20441), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (20402, 20441), True, 'import numpy.testing as npt\n'), ((20450, 20520), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb2'][[0, 2]]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb2'][[0, 2]], 0.0, decimal=2)\n", (20473, 20520), True, 'import numpy.testing as npt\n'), ((20529, 20592), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_torq'], 0.0, decimal=2)\n", (20552, 20592), True, 'import numpy.testing as npt\n'), ((20767, 20801), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (20775, 20801), True, 'import numpy as np\n'), ((21084, 21146), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (21107, 21146), True, 'import numpy.testing as npt\n'), ((21155, 21218), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (21178, 21218), True, 'import numpy.testing as npt\n'), ((21227, 21289), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (21250, 21289), True, 'import numpy.testing as npt\n'), ((21957, 22035), 'wisdem.drivetrainse.drive_structure.Hub_Rotor_LSS_Frame', 'ds.Hub_Rotor_LSS_Frame', ([], {'n_dlcs': '(1)', 'modeling_options': 'self.opt', 'direct_drive': '(True)'}), '(n_dlcs=1, modeling_options=self.opt, direct_drive=True)\n', (21979, 22035), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((22348, 22413), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb1'][1]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb1'][1], 0.0, decimal=2)\n", (22371, 22413), True, 'import numpy.testing as npt\n'), ((22422, 22484), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (22445, 22484), True, 'import numpy.testing as npt\n'), ((22493, 22556), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (22516, 22556), True, 'import numpy.testing as npt\n'), ((22565, 22627), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (22588, 22627), True, 'import numpy.testing as npt\n'), ((22636, 22706), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb2'][[0, 2]]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb2'][[0, 2]], 0.0, decimal=2)\n", (22659, 22706), True, 'import numpy.testing as npt\n'), ((22715, 22778), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_torq'], 0.0, decimal=2)\n", (22738, 22778), True, 'import numpy.testing as npt\n'), ((22953, 22987), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (22961, 22987), True, 'import numpy as np\n'), ((23270, 23332), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (23293, 23332), True, 'import numpy.testing as npt\n'), ((23341, 23404), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (23364, 23404), True, 'import numpy.testing as npt\n'), ((23413, 23475), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (23436, 23475), True, 'import numpy.testing as npt\n'), ((24187, 24266), 'wisdem.drivetrainse.drive_structure.Hub_Rotor_LSS_Frame', 'ds.Hub_Rotor_LSS_Frame', ([], {'n_dlcs': '(1)', 'modeling_options': 'self.opt', 'direct_drive': '(False)'}), '(n_dlcs=1, modeling_options=self.opt, direct_drive=False)\n', (24209, 24266), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((24540, 24606), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb1'][:2]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb1'][:2], 0.0, decimal=2)\n", (24563, 24606), True, 'import numpy.testing as npt\n'), ((24615, 24677), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (24638, 24677), True, 'import numpy.testing as npt\n'), ((24686, 24749), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (24709, 24749), True, 'import numpy.testing as npt\n'), ((24758, 24820), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (24781, 24820), True, 'import numpy.testing as npt\n'), ((24829, 24899), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb2'][[0, 2]]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb2'][[0, 2]], 0.0, decimal=2)\n", (24852, 24899), True, 'import numpy.testing as npt\n'), ((24908, 24971), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_torq'], 0.0, decimal=2)\n", (24931, 24971), True, 'import numpy.testing as npt\n'), ((25146, 25180), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (25154, 25180), True, 'import numpy as np\n'), ((25463, 25525), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (25486, 25525), True, 'import numpy.testing as npt\n'), ((25534, 25597), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (25557, 25597), True, 'import numpy.testing as npt\n'), ((25606, 25668), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (25629, 25668), True, 'import numpy.testing as npt\n'), ((26382, 26461), 'wisdem.drivetrainse.drive_structure.Hub_Rotor_LSS_Frame', 'ds.Hub_Rotor_LSS_Frame', ([], {'n_dlcs': '(1)', 'modeling_options': 'self.opt', 'direct_drive': '(False)'}), '(n_dlcs=1, modeling_options=self.opt, direct_drive=False)\n', (26404, 26461), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((26774, 26839), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb1'][1]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb1'][1], 0.0, decimal=2)\n", (26797, 26839), True, 'import numpy.testing as npt\n'), ((26848, 26910), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (26871, 26910), True, 'import numpy.testing as npt\n'), ((26919, 26982), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (26942, 26982), True, 'import numpy.testing as npt\n'), ((26991, 27053), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (27014, 27053), True, 'import numpy.testing as npt\n'), ((27062, 27132), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb2'][[0, 2]]", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb2'][[0, 2]], 0.0, decimal=2)\n", (27085, 27132), True, 'import numpy.testing as npt\n'), ((27141, 27204), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_torq'], 0.0, decimal=2)\n", (27164, 27204), True, 'import numpy.testing as npt\n'), ((27379, 27413), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (27387, 27413), True, 'import numpy as np\n'), ((27696, 27758), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_mb2']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_mb2'], 0.0, decimal=2)\n", (27719, 27758), True, 'import numpy.testing as npt\n'), ((27767, 27830), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['F_torq']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['F_torq'], 0.0, decimal=2)\n", (27790, 27830), True, 'import numpy.testing as npt\n'), ((27839, 27901), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['M_mb1']", '(0.0)'], {'decimal': '(2)'}), "(self.outputs['M_mb1'], 0.0, decimal=2)\n", (27862, 27901), True, 'import numpy.testing as npt\n'), ((28599, 28648), 'wisdem.drivetrainse.drive_structure.HSS_Frame', 'ds.HSS_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (28611, 28648), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((29078, 29112), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (29086, 29112), True, 'import numpy as np\n'), ((29260, 29309), 'wisdem.drivetrainse.drive_structure.HSS_Frame', 'ds.HSS_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (29272, 29309), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((29840, 29889), 'wisdem.drivetrainse.drive_structure.HSS_Frame', 'ds.HSS_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (29852, 29889), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((30357, 30391), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (30365, 30391), True, 'import numpy as np\n'), ((30539, 30588), 'wisdem.drivetrainse.drive_structure.HSS_Frame', 'ds.HSS_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (30551, 30588), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((31200, 31211), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (31208, 31211), True, 'import numpy as np\n'), ((31343, 31354), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (31351, 31354), True, 'import numpy as np\n'), ((31481, 31492), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (31489, 31492), True, 'import numpy as np\n'), ((31510, 31520), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (31517, 31520), True, 'import numpy as np\n'), ((31744, 31822), 'wisdem.drivetrainse.drive_structure.Hub_Rotor_LSS_Frame', 'ds.Hub_Rotor_LSS_Frame', ([], {'n_dlcs': '(1)', 'modeling_options': 'self.opt', 'direct_drive': '(True)'}), '(n_dlcs=1, modeling_options=self.opt, direct_drive=True)\n', (31766, 31822), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((32001, 32074), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['lss_axial_stress']", '(0.0)'], {'decimal': '(4)'}), "(self.outputs['lss_axial_stress'], 0.0, decimal=4)\n", (32024, 32074), True, 'import numpy.testing as npt\n'), ((32414, 32439), 'numpy.array', 'np.array', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (32422, 32439), True, 'import numpy as np\n'), ((32635, 32646), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (32643, 32646), True, 'import numpy as np\n'), ((32720, 32731), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (32728, 32731), True, 'import numpy as np\n'), ((32858, 32869), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (32866, 32869), True, 'import numpy as np\n'), ((32887, 32897), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (32894, 32897), True, 'import numpy as np\n'), ((33121, 33170), 'wisdem.drivetrainse.drive_structure.HSS_Frame', 'ds.HSS_Frame', ([], {'modeling_options': 'self.opt', 'n_dlcs': '(1)'}), '(modeling_options=self.opt, n_dlcs=1)\n', (33133, 33170), True, 'import wisdem.drivetrainse.drive_structure as ds\n'), ((33311, 33384), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['hss_axial_stress']", '(0.0)'], {'decimal': '(4)'}), "(self.outputs['hss_axial_stress'], 0.0, decimal=4)\n", (33334, 33384), True, 'import numpy.testing as npt\n'), ((33393, 33468), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (["self.outputs['hss_bending_stress']", '(0.0)'], {'decimal': '(4)'}), "(self.outputs['hss_bending_stress'], 0.0, decimal=4)\n", (33416, 33468), True, 'import numpy.testing as npt\n'), ((33642, 33681), 'unittest.makeSuite', 'unittest.makeSuite', (['TestDirectStructure'], {}), '(TestDirectStructure)\n', (33660, 33681), False, 'import unittest\n'), ((1099, 1109), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1106, 1109), True, 'import numpy as np\n'), ((1161, 1171), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1168, 1171), True, 'import numpy as np\n'), ((1338, 1351), 'numpy.ones', 'np.ones', (['npts'], {}), '(npts)\n', (1345, 1351), True, 'import numpy as np\n'), ((1777, 1787), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1784, 1787), True, 'import numpy as np\n'), ((1882, 1892), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1889, 1892), True, 'import numpy as np\n'), ((4262, 4280), 'wisdem.drivetrainse.layout.DirectLayout', 'lay.DirectLayout', ([], {}), '()\n', (4278, 4280), True, 'import wisdem.drivetrainse.layout as lay\n'), ((4296, 4314), 'wisdem.drivetrainse.layout.GearedLayout', 'lay.GearedLayout', ([], {}), '()\n', (4312, 4314), True, 'import wisdem.drivetrainse.layout as lay\n'), ((33742, 33767), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (33765, 33767), False, 'import unittest\n'), ((3009, 3051), 'numpy.array', 'np.array', (['[2409750.0, -1716429.0, 74352.9]'], {}), '([2409750.0, -1716429.0, 74352.9])\n', (3017, 3051), True, 'import numpy as np\n'), ((3103, 3145), 'numpy.array', 'np.array', (['[2409750.0, -1716429.0, 74352.9]'], {}), '([2409750.0, -1716429.0, 74352.9])\n', (3111, 3145), True, 'import numpy as np\n'), ((3197, 3243), 'numpy.array', 'np.array', (['[-18329100.0, 6171732.4, 5785829.46]'], {}), '([-18329100.0, 6171732.4, 5785829.46])\n', (3205, 3243), True, 'import numpy as np\n'), ((3294, 3340), 'numpy.array', 'np.array', (['[-18329100.0, 6171732.4, 5785829.46]'], {}), '([-18329100.0, 6171732.4, 5785829.46])\n', (3302, 3340), True, 'import numpy as np\n'), ((3582, 3617), 'numpy.array', 'np.array', (['[2409750.0, 0.0, 7435.29]'], {}), '([2409750.0, 0.0, 7435.29])\n', (3590, 3617), True, 'import numpy as np\n'), ((3668, 3711), 'numpy.array', 'np.array', (['[-18329.1, 617173.24, 578582.946]'], {}), '([-18329.1, 617173.24, 578582.946])\n', (3676, 3711), True, 'import numpy as np\n'), ((4589, 4600), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4597, 4600), True, 'import numpy as np\n'), ((4648, 4659), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4656, 4659), True, 'import numpy as np\n'), ((4707, 4718), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4715, 4718), True, 'import numpy as np\n'), ((4766, 4777), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4774, 4777), True, 'import numpy as np\n'), ((6620, 6654), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (6628, 6654), True, 'import numpy as np\n'), ((6696, 6730), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (6704, 6730), True, 'import numpy as np\n'), ((7126, 7137), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7134, 7137), True, 'import numpy as np\n'), ((7185, 7196), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7193, 7196), True, 'import numpy as np\n'), ((7244, 7255), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7252, 7255), True, 'import numpy as np\n'), ((7303, 7314), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7311, 7314), True, 'import numpy as np\n'), ((9244, 9278), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (9252, 9278), True, 'import numpy as np\n'), ((9320, 9354), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (9328, 9354), True, 'import numpy as np\n'), ((9700, 9711), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9708, 9711), True, 'import numpy as np\n'), ((9759, 9770), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9767, 9770), True, 'import numpy as np\n'), ((9818, 9829), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9826, 9829), True, 'import numpy as np\n'), ((9877, 9888), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9885, 9888), True, 'import numpy as np\n'), ((11731, 11765), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (11739, 11765), True, 'import numpy as np\n'), ((11807, 11841), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (11815, 11841), True, 'import numpy as np\n'), ((12293, 12304), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12301, 12304), True, 'import numpy as np\n'), ((12352, 12363), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12360, 12363), True, 'import numpy as np\n'), ((12411, 12422), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12419, 12422), True, 'import numpy as np\n'), ((12470, 12481), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12478, 12481), True, 'import numpy as np\n'), ((14411, 14445), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (14419, 14445), True, 'import numpy as np\n'), ((14487, 14521), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (14495, 14521), True, 'import numpy as np\n'), ((14818, 14829), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (14826, 14829), True, 'import numpy as np\n'), ((14877, 14888), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (14885, 14888), True, 'import numpy as np\n'), ((14937, 14948), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (14945, 14948), True, 'import numpy as np\n'), ((15002, 15013), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15010, 15013), True, 'import numpy as np\n'), ((15061, 15072), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15069, 15072), True, 'import numpy as np\n'), ((15120, 15131), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15128, 15131), True, 'import numpy as np\n'), ((15180, 15191), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15188, 15191), True, 'import numpy as np\n'), ((15245, 15256), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15253, 15256), True, 'import numpy as np\n'), ((16814, 16848), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (16822, 16848), True, 'import numpy as np\n'), ((17304, 17315), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17312, 17315), True, 'import numpy as np\n'), ((17363, 17374), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17371, 17374), True, 'import numpy as np\n'), ((17423, 17434), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17431, 17434), True, 'import numpy as np\n'), ((17488, 17499), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17496, 17499), True, 'import numpy as np\n'), ((17547, 17558), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17555, 17558), True, 'import numpy as np\n'), ((17606, 17617), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17614, 17617), True, 'import numpy as np\n'), ((17666, 17677), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17674, 17677), True, 'import numpy as np\n'), ((17731, 17742), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (17739, 17742), True, 'import numpy as np\n'), ((19300, 19334), 'numpy.array', 'np.array', (['[3000.0, 4000.0, 5000.0]'], {}), '([3000.0, 4000.0, 5000.0])\n', (19308, 19334), True, 'import numpy as np\n'), ((19676, 19687), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (19684, 19687), True, 'import numpy as np\n'), ((19735, 19746), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (19743, 19746), True, 'import numpy as np\n'), ((21824, 21835), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (21832, 21835), True, 'import numpy as np\n'), ((21883, 21894), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (21891, 21894), True, 'import numpy as np\n'), ((24049, 24060), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (24057, 24060), True, 'import numpy as np\n'), ((24108, 24119), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (24116, 24119), True, 'import numpy as np\n'), ((26244, 26255), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (26252, 26255), True, 'import numpy as np\n'), ((26303, 26314), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (26311, 26314), True, 'import numpy as np\n'), ((28461, 28472), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (28469, 28472), True, 'import numpy as np\n'), ((28520, 28531), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (28528, 28531), True, 'import numpy as np\n'), ((29702, 29713), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (29710, 29713), True, 'import numpy as np\n'), ((29761, 29772), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (29769, 29772), True, 'import numpy as np\n'), ((31026, 31037), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (31034, 31037), True, 'import numpy as np\n'), ((31085, 31115), 'numpy.array', 'np.array', (['[100000.0, 0.0, 0.0]'], {}), '([100000.0, 0.0, 0.0])\n', (31093, 31115), True, 'import numpy as np\n'), ((32471, 32501), 'numpy.array', 'np.array', (['[100000.0, 0.0, 0.0]'], {}), '([100000.0, 0.0, 0.0])\n', (32479, 32501), True, 'import numpy as np\n'), ((33553, 33563), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (33560, 33563), True, 'import numpy as np\n'), ((5787, 5802), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (5796, 5802), True, 'import numpy as np\n'), ((6195, 6210), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (6204, 6210), True, 'import numpy as np\n'), ((8346, 8361), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (8355, 8361), True, 'import numpy as np\n'), ((8774, 8789), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (8783, 8789), True, 'import numpy as np\n'), ((10898, 10913), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (10907, 10913), True, 'import numpy as np\n'), ((11306, 11321), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (11315, 11321), True, 'import numpy as np\n'), ((13513, 13528), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (13522, 13528), True, 'import numpy as np\n'), ((13941, 13956), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (13950, 13956), True, 'import numpy as np\n'), ((16217, 16232), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (16226, 16232), True, 'import numpy as np\n'), ((16287, 16302), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (16296, 16302), True, 'import numpy as np\n'), ((18703, 18718), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (18712, 18718), True, 'import numpy as np\n'), ((18773, 18788), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (18782, 18788), True, 'import numpy as np\n'), ((32157, 32168), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (32165, 32168), True, 'import numpy as np\n')] |
import pdb, sys, os, time, requests, json
import numpy as np
import matplotlib.pyplot as plt
try:
import pysynphot
pysynphotImport = True
except:
pysynphotImport = False
import math
from urllib.parse import quote as urlencode
"""
readStellarTrack()
planetMassFromRadius()
solarSystem()
computeTSM()
"""
RSUN_SI = 6.955e8
MSUN_SI = 1.9889e30
MJUP_SI = 1.8986e27
MEARTH_SI = 5.9723e24
RJUP_SI = 7.149e7
REARTH_SI = 6.371e6
AU_SI = 1.496e11
GRAV_SI = 6.67428e-11 # gravitational constant in m^3 kg^-1 s^-2
def photBands( makePlot=False ):
idir = os.path.dirname( __file__ )
tessPath = os.path.join( idir, 'tess-response-function-v2.0.csv' )
Vband = np.loadtxt( os.path.join( idir, 'Bessel_V.dat' ) )
Vband[:,0] /= 1e4 # convert from Angstrom to micron.
Iband = np.loadtxt( os.path.join( idir, 'Bessel_I.dat' ) )
Iband[:,0] /= 1e4 # convert from Angstrom to micron.
Tband = np.loadtxt( tessPath, delimiter=',' )
Tband[:,0] /= 1e3 # convert from nm to micron.
Jband = np.loadtxt( os.path.join( idir, '2MASS_J.dat' ) )
Jband[:,0] /= 1e3 # convert from nm to micron.
Hband = np.loadtxt( os.path.join( idir, '2MASS_H.dat' ) )
Hband[:,0] /= 1e3 # convert from nm to micron.
Kband = np.loadtxt( os.path.join( idir, '2MASS_Ks.dat' ) )
Kband[:,0] /= 1e3 # convert from nm to micron.
if makePlot:
plt.figure()
z = [ [Vband,'V'], [Iband,'I'], [Tband,'T'], \
[Jband,'J'], [Hband,'H'], [Kband,'K'] ]
for i in z:
plt.plot( i[0][:,0], i[0][:,1]/i[0][:,1].max(), label=i[1] )
plt.legend()
return Vband, Iband, Tband, Jband, Hband, Kband
def modelStellarSpectrum( TeffK, loggCGS, FeH=0 ):
if TeffK<3500:
print( ' WARNING: set Teff={0:.0f}K --> Teff=3500K'.format( TeffK ) )
TeffK = 3500
if loggCGS>5:
print( ' WARNING: set logg={0:.3f} --> logg=5'.format( loggCGS ) )
loggCGS = 5
sp = pysynphot.Icat( 'k93models', TeffK, FeH, loggCGS )
sp.convert( pysynphot.units.Angstrom )
sp.convert( pysynphot.units.Photlam )
wavAngstrom = sp.wave
wavMicr = wavAngstrom*(1e-4)
F = sp.flux*wavAngstrom
star = [ wavMicr, F ]
return star
def JHKVmags( TICIDs ):
listtici = list( TICIDs )
def mast_query( request ):
"""
Perform a MAST query.
Parameters
----------
request (dictionary): The MAST request json object
Returns head,content where head is the response HTTP headers, and content is the returned data
"""
# Base API url
request_url='https://mast.stsci.edu/api/v0/invoke'
# Grab Python Version
version =".".join(map(str, sys.version_info[:3]))
# Perform the HTTP request
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-agent':'python-requests/'+version}
# Encoding the request as a json string
req_string = json.dumps(request)
req_string = urlencode(req_string)
# Perform the HTTP request
resp = requests.post(request_url, data='request='+req_string, headers=headers)
# Pull out the headers and response content
head = resp.headers
content = resp.content.decode('utf-8')
return head, content
request = {'service':'Mast.Catalogs.Filtered.Tic', 'format':'json', \
'params':{'columns':'rad, mass', \
'filters':[{'paramName':'ID', 'values':listtici}]}}
headers, outString = mast_query( request )
dictquertemp = json.loads( outString )['data']
magsDictByID = {}
for planet in dictquertemp:
magsDictByID[planet['ID']] = {'Jmag':planet['Jmag'],'Hmag':planet['Hmag'],\
'Kmag':planet['Kmag'],'Vmag':planet['Vmag'], \
'Imag':planet['imag'] }
magsDict = {}
mags = ['Jmag', 'Hmag', 'Kmag', 'Vmag', 'Imag']
for mag in mags:
maglist = []
for TICID in listtici:
planet = magsDictByID[int( TICID )]
maglist.append( planet[mag] )
magsDict[mag] = np.array( maglist, dtype=float )
return magsDict
def tickLogFormat( y, pos ):
# Find the number of decimal places required
decimalplaces = int(np.ceil(np.maximum(-np.log10(y),0))) # =0 for numbers >=1
# Insert that number into a format string
formatstring = '{{:.{:1d}f}}'.format(decimalplaces)
# Return the formatted tick label
return formatstring.format(y)
def testWASP121():
Vmag = 10.51
Jmag = 9.625
Kmag = 9.374
TeffK = 6500.
loggCGS = 4.5
Jest = convertMag( Vmag, TeffK, loggCGS, inputMag='V', outputMag='J' )
Kest = convertMag( Vmag, TeffK, loggCGS, inputMag='V', outputMag='Ks' )
print( Jmag, Jest )
print( Kmag, Kest )
return None
def convertMag( inMag, TeffK, loggCGS, inputMag='T', outputMag='J', vega=None, star=None ):
"""
Routine to convert Tmag to JHK mag.
For example:
Kmag = Tmag + 2.5*log10( [ vega_K/vega_T ]*[ star_T/star_K ] )
where vega_K/vega_T is flux of Vega in the K band relative to T band
and star_K/star_T is flux of Vegathe star of interest in the K band
relative to T band.
"""
# Read in spectra for Vega and the star of interest:
# NOTE: This is the most time-consuming step.
t1 = time.time()
if vega is None:
vega = spectrumVega( makePlot=False )
t2 = time.time()
if star is None:
star = modelStellarSpectrum( TeffK, loggCGS, FeH=0 )
t3 = time.time()
# Read in the photometric passbands:
V, I, T, J, H, Ks = photBands()
if inputMag=='V':
inM = V
elif inputMag=='T':
inM = T
elif inputMag=='J':
inM = J
if outputMag=='V':
M = V
elif outputMag=='I':
M = I
elif outputMag=='J':
M = J
elif outputMag=='H':
M = H
elif outputMag=='Ks':
M = Ks
# Interpolate the Vega spectrum onto the photometric passbands
# and then sum to get the relative fluxes in each passband:
t4 = time.time()
Fvega_I = np.sum( inM[:,1]*np.interp( inM[:,0], vega[0], vega[1] ) )
Fvega_M = np.sum( M[:,1]*np.interp( M[:,0], vega[0], vega[1] ) )
# Do the same for the star of interest:
t5 = time.time()
Fstar_I = np.sum( inM[:,1]*np.interp( inM[:,0], star[0], star[1] ) )
Fstar_M = np.sum( M[:,1]*np.interp( M[:,0], star[0], star[1] ) )
t6 = time.time()
# Use the relative brightnesses to convert from Tmag to the output magnitude:
outMag = inMag + 2.5*np.log10( ( Fvega_M/Fvega_I )*( Fstar_I/Fstar_M ) )
t7 = time.time()
return outMag
def spectrumVega( makePlot=False ):
sp = pysynphot.Icat( 'k93models', 9600, 0, 4.1 )
sp.convert( pysynphot.units.Angstrom )
sp.convert( pysynphot.units.Photlam )
wavAngstrom = sp.wave
wavMicr = wavAngstrom*(1e-4)
F = sp.flux*wavAngstrom
if makePlot:
# Compare to observed/model Vega from HST calibration:
ipath = os.path.join( os.environ['PYSYN_CDBS'], 'calspec', \
'alpha_lyr_stis_010.fits' )
hst = pysynphot.FileSpectrum( ipath )
plt.figure()
plt.plot( wavMicr, F/F.max(), '-k', label='Kurucz model' )
plt.plot( hst.wave/1e4, hst.flux/hst.flux.max(), '-r', label='HST cal' )
plt.xlim( [ 0, 2 ] )
plt.title( 'Vega' )
return wavMicr, F
def densityContours():
idir = os.path.dirname( __file__ )
h2 = np.loadtxt( os.path.join( idir, 'contours_h2.txt' ) )
h2o = np.loadtxt( os.path.join( idir, 'contours_h2o.txt' ) )
mgsio3 = np.loadtxt( os.path.join( idir, 'contours_mgsio3.txt' ), skiprows=1 )
fe = np.loadtxt( os.path.join( idir, 'contours_fe.txt' ), skiprows=1 )
uM = 1 # ( MEARTH_SI/MJUP_SI )
uR = 1 # ( REARTH_SI/RJUP_SI )
h2[:,0] = h2[:,0]*uM
h2[:,1] = h2[:,1]*uR
h2o[:,0] = h2o[:,0]*uM
h2o[:,1] = h2o[:,1]*uR
mgsio3[:,0] = mgsio3[:,0]*uM
mgsio3[:,1] = mgsio3[:,1]*uR
fe[:,0] = fe[:,0]*uM
fe[:,1] = fe[:,1]*uR
return h2, h2o, mgsio3, fe
def readStellarTrack():
d = np.loadtxt( 'ames_dusty_5Gyr.txt' )
MsSI = d[:,0]*MSUN_SI
TeffK = d[:,1]
loggCGS = d[:,3]
gCGS = 10**loggCGS
gSI = gCGS/100.
RsSI = np.sqrt( GRAV_SI*MsSI/gSI )
RsRE = RsSI/REARTH_SI
return RsRE, TeffK
def massRadiusChenKipping2017( RpRE_in ):
"""
Evaluates the mean of the Chen & Kipping (2017) distribution.
NOTE:
The S3 index has been adjusted to be slightly positive. This
is done purely for convenience, because otherwise it is not
possible to quickly obtain a deterministic mass for a given
radius, i.e. when there's a combination of negative and
positive indices there will be mass degeneracies for certain
input radii.
"""
# Power law indices:
S1 = 0.2790
S2 = 0.589
#S3 = -0.044 # value quoted in Chen & Kipping (2017)
S3 = 0.01 # mild tweak done purely for convenience
S4 = 0.881
# Other transition points from Table 1
T12ME = np.log10( 2.04 )
T23ME = np.log10( 0.414*( MJUP_SI/MEARTH_SI ) )
T34ME = np.log10( 0.080*( MSUN_SI/MEARTH_SI ) )
# Terran power law constant from Table 1:
C1curl = np.log10( 1.008 )
# Iteratively derive other power law constants:
C2curl = C1curl + ( S1-S2 )*T12ME
C3curl = C2curl + ( S2-S3 )*T23ME
C4curl = C3curl + ( S3-S4 )*T34ME
log10MpME = np.linspace( -3, 5, 1000 )
log10M12 = np.log10( 2.04 )
log10M23 = np.log10( 0.414*( MJUP_SI/MEARTH_SI ) )
log10M34 = np.log10( 0.080*( MSUN_SI/MEARTH_SI ) )
ixs1 = ( log10MpME<=log10M12 )
ixs2 = ( log10MpME>log10M12 )*( log10MpME<=log10M23 )
ixs3 = ( log10MpME>log10M23 )*( log10MpME<=log10M34 )
ixs4 = ( log10MpME>log10M34 )
log10RpRE = np.ones_like( log10MpME )
log10RpRE[ixs1] = C1curl + ( log10MpME[ixs1]*S1 )
log10RpRE[ixs2] = C2curl + ( log10MpME[ixs2]*S2 )
log10RpRE[ixs3] = C3curl + ( log10MpME[ixs3]*S3 )
log10RpRE[ixs4] = C4curl + ( log10MpME[ixs4]*S4 )
log10MpME_out = np.interp( np.log10( RpRE_in ), log10RpRE, log10MpME )
MpME_out = 10**log10MpME_out
if 0:
plt.figure()
plt.plot( log10MpME[ixs1], log10RpRE[ixs1], '-r' )
plt.plot( log10MpME[ixs2], log10RpRE[ixs2], '-k' )
plt.plot( log10MpME[ixs3], log10RpRE[ixs3], '-g' )
plt.plot( log10MpME[ixs4], log10RpRE[ixs4], '-c' )
print( RpRE_in, MpME_out )
pdb.set_trace()
return MpME_out
def planetMassFromRadius( RpRE, whichRelation='Chen&Kipping2017' ):
"""
Taken from Eq 2 of Kempton et al (2017).
"""
if whichRelation=='Chen&Kipping2017':
MpME = massRadiusChenKipping2017( RpRE )
elif whichRelation=='Kempton+2018':
if np.ndim( RpRE )==0:
if RpRE<1.23:
MpME = 0.9718*( RpRE**3.58 )
elif ( RpRE>=1.23 )*( RpRE<30 ):
MpME = 1.436*( RpRE**1.70 )
else:
print( '\n\nPlanet radius too large... {0:.1f}RE'.format( RpRE ) )
MpME = np.nan
else:
MpME = np.zeros_like( RpRE )
ixs1 = ( RpRE<1.23 )
MpME[ixs1] = 0.9718*( RpRE[ixs1]**3.58 )
ixs2 = ( RpRE>=1.23 )*( RpRE<14.26 )
MpME[ixs2] = 1.436*( RpRE[ixs2]**1.70 )
ixs3 = ( RpRE>=14.26 )
MpME[ixs3] = np.nan
return MpME
def solarSystem():
z = {}
z['TstarK'] = 5800.
z['RsSI'] = RSUN_SI
z['aAU'] = {}
z['aAU']['Mercury'] = 0.3870993
z['aAU']['Venus'] = 0.723336
z['aAU']['Earth'] = 1.000003
z['aAU']['Mars'] = 1.52371
z['aAU']['Jupiter'] = 5.2029
z['aAU']['Saturn'] = 9.537
z['aAU']['Titan'] = z['aAU']['Saturn']
z['aAU']['Uranus'] = 19.189
z['aAU']['Neptune'] = 30.0699
planets = list( z['aAU'].keys() )
z['aSI'] = {}
for k in planets:
z['aSI'][k] = AU_SI*z['aAU'][k]
z['aRs'] = {}
for k in planets:
z['aRs'][k] = z['aSI'][k]/z['RsSI']
z['TeqK'] = {}
for k in planets:
z['TeqK'][k] = calcTeqK( z['TstarK'], z['aRs'][k] )
z['RpSI'] = {}
z['RpSI']['Mercury'] = ( 4879e3 )/2.
z['RpSI']['Venus'] = ( 12104e3 )/2.
z['RpSI']['Earth'] = ( 12756e3 )/2.
z['RpSI']['Mars'] = ( 6792e3 )/2.
z['RpSI']['Jupiter'] = ( 142984e3 )/2.
z['RpSI']['Saturn'] = ( 120536e3 )/2.
z['RpSI']['Titan'] = 2575e3
z['RpSI']['Uranus'] = ( 51118e3 )/2.
z['RpSI']['Neptune'] = ( 49528e3 )/2.
z['RpRE'] = {}
for k in planets:
z['RpRE'][k] = z['RpSI'][k]/REARTH_SI
return z
def computeTSM( RpValRE, MpValME, RsRS, TeqK, Jmag ):
nAll = len( RpValRE )
# Indices for different radii of each scale factor:
ixsA = np.arange( nAll )[np.isfinite( RpValRE )]
ixsB = np.arange( nAll )[np.isfinite( RpValRE )==False]
ixs1 = ixsA[( RpValRE[ixsA]<1.5 )]
ixs2 = ixsA[( RpValRE[ixsA]>=1.5 )*( RpValRE[ixsA]<2.75 )]
ixs3 = ixsA[( RpValRE[ixsA]>=2.75 )*( RpValRE[ixsA]<4.0 )]
ixs4 = ixsA[( RpValRE[ixsA]>=4.0 )*( RpValRE[ixsA]<10. )]
ixs5 = ixsA[( RpValRE[ixsA]>=10 )]
# Scale factors provided in Table 1 of Kempton et al (2018):
c1 = 0.190
c2 = 1.26
c3 = 1.28
c4 = 1.15
c5 = 1.0
# TSM before applying scale factor:
Rp3 = RpValRE**3.
MpRs2 = MpValME*( RsRS**2. )
y = ( Rp3*TeqK/MpRs2 )*( 10**( -Jmag/5. ) )
TSM = np.zeros( nAll )
TSM[ixs1] = c1*y[ixs1]
TSM[ixs2] = c2*y[ixs2]
TSM[ixs3] = c3*y[ixs3]
TSM[ixs4] = c4*y[ixs4]
TSM[ixs5] = c5*y[ixs5]
TSM[ixsB] = np.nan
return TSM
def computeESM( TeqK, RpRs, TstarK, Kmag ):
wavRefSI = 7.5e-6
TdayK = 1.10*TeqK # from Section 3.2 of Kempton+2018
Bday = PlanckFuncSI( wavRefSI, TdayK )
Bstar = PlanckFuncSI( wavRefSI, TstarK )
ESM = 4.29*(1e6)*( Bday/Bstar )*( RpRs**2. )*( 10**( -Kmag/5. ) )
return ESM
def PlanckFuncSI( wavSI, T ):
"""
Returns the Planck spectrum in cgs units given
a wavelength range and temperature.
"""
hSI = np.longdouble( 6.62607015e-34 ) # Planck constant (J*s)
cSI = np.longdouble( 2.9979245800e8 ) # speed of light (m/s)
kSI = np.longdouble( 1.380649e-23 ) # Boltzman constant (J/K)
c0 = 2.*hSI*( cSI**2. )/( wavSI**5. )
c1 = hSI*cSI/kSI/T
irrSI = c0/( np.exp( c1/wavSI ) - 1. ) # radiance
fluxSI = np.pi*irrSI*wavSI # surface flux
return fluxSI
def calcTeqK( TstarK, aRs ):
TeqK = ( TstarK/np.sqrt( aRs ) )*( 0.25**0.25 )
return TeqK
def getThresholdTSM_REDUNDANT( RpRE, framework='ACWG' ):
"""
Thresholds from Figure 5 of Kempton et al. (2018).
"""
if framework=='ACWG':
if RpRE<1.50: # 1. Terrestrials
return 10
elif ( RpRE>=1.50 )*( RpRE<2.75 ): # 2. Small sub-Neptunes
return 90
elif ( RpRE>=2.75 )*( RpRE<4.00 ): # 3. Large sub-Neptunes
return 90
elif ( RpRE>=4.00 )*( RpRE<10.00 ): # 4. Sub-Jovians
return 90
elif ( RpRE>=10.00 ): # 5. Gas giants
return 100
elif framework=='TOIs':
if RpRE<1.50: # 1. Terrestrials
return 10
else:
return 50 # 2. Everything else
def getTSMStr_REDUNDANT( thresholdTSM ):
if thresholdTSM=='ACWG':
TSMStr = '* Kempton et al. (2018) TSM cuts applied'
elif thresholdTSM=='TOIs':
TSMStr = 'Only targets with TSM>50 (Rp>1.5RE) or TSM>10 (Rp<1.5RE) shown'
else:
TSMStr = 'Only targets with TSM>{0:.0f} shown'.format( thresholdTSM )
return TSMStr
def getRARanges():
m = 4
RAedges = np.arange( 0, 24+m, m )
n = len( RAedges )-1
RARanges = []
for i in range( n ):
RARanges += [ [ RAedges[i], RAedges[i+1] ] ]
return RARanges
def getRARange( month ):
# RAmid is approximately the sidereal angle (i.e. overhead RA) at
# midnight on the 20th day of the month.
if month=='Jan':
RAmid = 8
elif month=='Feb':
RAmid = 10
elif month=='Mar':
RAmid = 12
elif month=='Apr':
RAmid = 14
elif month=='May':
RAmid = 16
elif month=='Jun':
RAmid = 18
elif month=='Jul':
RAmid = 20
elif month=='Aug':
RAmid = 22
elif month=='Sep':
RAmid = 0
elif month=='Oct':
RAmid = 2
elif month=='Nov':
RAmid = 4
elif month=='Dec':
RAmid = 6
dRA = 6 # +/- RA hr from overhead at midnight.
RARange = [ RAmid-dRA, RAmid+dRA ]
return RARange
def processRARestriction( RAMin_hr, RAMax_hr ):
if ( RAMin_hr is not None )*( RAMax_hr is not None ):
RAStr = '{0:.0f}<RA(hr)<{1:.0f}'.format( RAMin_hr, RAMax_hr )
elif ( RAMin_hr is not None )+( RAMax_hr is not None ):
if RAMin_hr is None:
RAMin_hr = -1e9
RAStr = 'Only RA(hr)<{0:.1f} targets'.format( RAMax_hr )
else:
RAMax_hr = 1e9
RAStr = 'Only RA(hr)>{0:.0f} targets'.format( RAMin_hr )
else:
RAMin_hr = -1e9
RAMax_hr = 1e9
RAStr = 'No RA restrictions applied'
return RAStr, RAMin_hr, RAMax_hr
def processDecRestriction( DecMin_deg, DecMax_deg ):
if ( DecMin_deg is not None )*( DecMax_deg is not None ):
DecStr = '{0:.0f}<Dec(deg)<{1:.0f}'.format( DecMin_deg, DecMax_deg )
elif ( DecMin_deg is not None )+( DecMax_deg is not None ):
if DecMin_deg is None:
DecMin_deg = -1e9
DecStr = 'Only Dec(deg)<{0:.0f} targets'.format( DecMax_deg )
else:
DecMax_deg = 1e9
DecStr = 'Only Dec(deg)>{0:.0f} targets'.format( DecMin_deg )
else:
DecMin_deg = -1e9
DecMax_deg = 1e9
DecStr = 'No Dec restrictions applied'
return DecStr, DecMin_deg, DecMax_deg
def getStarColor( T ):
if ( T<3400 ): # late-M
c = np.array( [178,24,43] )/256.
elif ( T>=3400 )*( T<3800 ): # early-M
c = np.array( [252,78,42] )/256.
elif ( T>=3800 )*( T<4600 ): # late-K
c = np.array( [253,141,60] )/256.
elif ( T>=4600 )*( T<5200 ): # early-K
c = np.array( [254,178,76] )/256.
elif ( T>=5200 )*( T<5700 ): # late-G
c = np.array( [254,217,118] )/256.
elif ( T>=5700 )*( T<6000 ): # early-G
c = np.array( [255,237,160] )/256.
elif ( T>=6000 )*( T<6700 ): # late-F
c = np.array( [158,202,225] )/256.
elif ( T>=6700 )*( T<7400 ): # early-F
c = np.array( [107,174,214] )/256.
else: # OBA
c = np.array( [8,81,156] )/256.
return c
def getAllStarColors():
c = {}
c['late-M'] = getStarColor( 3200 )
c['early-M'] = getStarColor( 3600 )
c['late-K'] = getStarColor( 4000 )
c['early-K'] = getStarColor( 4800 )
c['late-G'] = getStarColor( 5500 )
c['early-G'] = getStarColor( 5900 )
c['late-F'] = getStarColor( 6500 )
c['early-F'] = getStarColor( 7200 )
c['OBA'] = getStarColor( 7500 )
SpTs = [ 'late-M', 'early-M', 'late-K', 'early-K', \
'late-G', 'early-G', 'late-F', 'early-F', 'OBA' ]
return c, SpTs
def computeStellarMass( RsRS, loggstarCGS ):
gStarSI = 10**loggstarCGS / 100 # converts log(g)[cm/s^2] to g [m/s^2]
RsRS = RsRS * RSUN_SI # converts stellar radius from solar unit to SI
MsMS = gStarSI * RsRS**2 / GRAV_SI
MsMS /= MSUN_SI # converts stellar mass to solar unit
return MsMS
def computeRVSemiAmp( Pday, MpME, MsMS ):
"""
Returns RV semi-amplitude in m/s.
Equation from: https://exoplanetarchive.ipac.caltech.edu/docs/poet_calculations.html
"""
MpMJ = MpME * MEARTH_SI / MJUP_SI # converts mass from Earth unit to Jupiter unit
i = math.pi/2
e = 0
K = 203 * (Pday)**(-1/3) * \
((MpMJ * math.sin(i)) / (MsMS + 9.458e-4 * (MpMJ)**(2/3))) * \
(1 / (1 - e**2)**(1/2))
return K
def TeqK_Kempton (Pday, MsMS, TstarK, RsRS):
"""
Computes TeqK values based on Kempton assuming a circular orbit.
"""
Psec = Pday*24*3600
MsSI = MsMS * MSUN_SI
RsSI = RsRS * RSUN_SI
aSI = (GRAV_SI * MsSI * Psec**2/(4*np.pi**2))**(1/3)
TeqK = TstarK * (RsSI/aSI)**(1/2) * (1/4)**(1/4)
aRs = aSI/RsSI
return TeqK, aRs
def HeatMapValues(TRange, RRange, TeqK, RpValRE, predTeqK, predRpVal):
TOI_TeqK = list(TeqK[:])
TOI_Rp = list(RpValRE[:])
pred_TeqK = list(predTeqK[:])
pred_Rp = list(predRpVal[:])
TOI_n = 0
for i in range(len(TOI_TeqK)): #Check if TOI is in box, if so then add one to TOI_n
if TRange[0] <= TOI_TeqK[i] <= TRange[1] and RRange[0] <= TOI_Rp[i] <= RRange[1]:
TOI_n += 1
pred_n = 0
for i in range(len(pred_TeqK)): #Check if pred is in box, if so then add one to pred_n
if TRange[0] <= pred_TeqK[i] <= TRange[1] and RRange[0] <= pred_Rp[i] <= RRange[1]:
pred_n += 1
#Generate fraction of TOIs, pred
TOI_frac = TOI_n/len(TOI_TeqK)
pred_frac = pred_n/len(pred_TeqK)
if pred_frac == 0: #Prevents divide by zero error
pred_frac = 1
#Final value is ratio of fraction of TOIs to pred
value = TOI_frac/pred_frac
return value
def Normalize(values, clip, scaled=True):
box_avg = np.average(values)
box_std = np.std(values)
box_values3 = list(values)
for value in values:
if np.abs(value-box_avg) > 3*box_std:
box_values3.remove(value)
elif not np.isfinite(value):
box_values3.remove(value)
minVal = np.min(box_values3)
maxVal = np.max(box_values3)
box_norm = []
for value in values:
if not np.isfinite(value):
norm = 0.9999 #If zero TOIs in box, box gets colored white
elif value <= 0: #If TOI fraction < pred fraction, box colored cool
norm = 0.99*(value - minVal)/(0 - minVal)*0.5
if norm < 0:
norm = 0
elif value > 0: #If TOI fraction > pred fraction, box colored warm
norm = 0.5 + 0.99*value/maxVal*0.5
if norm > 1:
norm = 0.99
box_norm.append(norm)
return box_norm
| [
"numpy.log10",
"numpy.sqrt",
"requests.post",
"numpy.longdouble",
"numpy.array",
"numpy.isfinite",
"numpy.arange",
"pysynphot.FileSpectrum",
"json.dumps",
"matplotlib.pyplot.plot",
"numpy.ndim",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"numpy.min",
"pysynphot.Icat",
"numpy.abs",
... | [((563, 588), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (578, 588), False, 'import pdb, sys, os, time, requests, json\n'), ((606, 659), 'os.path.join', 'os.path.join', (['idir', '"""tess-response-function-v2.0.csv"""'], {}), "(idir, 'tess-response-function-v2.0.csv')\n", (618, 659), False, 'import pdb, sys, os, time, requests, json\n'), ((918, 953), 'numpy.loadtxt', 'np.loadtxt', (['tessPath'], {'delimiter': '""","""'}), "(tessPath, delimiter=',')\n", (928, 953), True, 'import numpy as np\n'), ((1955, 2003), 'pysynphot.Icat', 'pysynphot.Icat', (['"""k93models"""', 'TeffK', 'FeH', 'loggCGS'], {}), "('k93models', TeffK, FeH, loggCGS)\n", (1969, 2003), False, 'import pysynphot\n'), ((5645, 5656), 'time.time', 'time.time', ([], {}), '()\n', (5654, 5656), False, 'import pdb, sys, os, time, requests, json\n'), ((5733, 5744), 'time.time', 'time.time', ([], {}), '()\n', (5742, 5744), False, 'import pdb, sys, os, time, requests, json\n'), ((5836, 5847), 'time.time', 'time.time', ([], {}), '()\n', (5845, 5847), False, 'import pdb, sys, os, time, requests, json\n'), ((6379, 6390), 'time.time', 'time.time', ([], {}), '()\n', (6388, 6390), False, 'import pdb, sys, os, time, requests, json\n'), ((6591, 6602), 'time.time', 'time.time', ([], {}), '()\n', (6600, 6602), False, 'import pdb, sys, os, time, requests, json\n'), ((6755, 6766), 'time.time', 'time.time', ([], {}), '()\n', (6764, 6766), False, 'import pdb, sys, os, time, requests, json\n'), ((6935, 6946), 'time.time', 'time.time', ([], {}), '()\n', (6944, 6946), False, 'import pdb, sys, os, time, requests, json\n'), ((7012, 7053), 'pysynphot.Icat', 'pysynphot.Icat', (['"""k93models"""', '(9600)', '(0)', '(4.1)'], {}), "('k93models', 9600, 0, 4.1)\n", (7026, 7053), False, 'import pysynphot\n'), ((7783, 7808), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7798, 7808), False, 'import pdb, sys, os, time, requests, json\n'), ((8451, 8484), 'numpy.loadtxt', 'np.loadtxt', (['"""ames_dusty_5Gyr.txt"""'], {}), "('ames_dusty_5Gyr.txt')\n", (8461, 8484), True, 'import numpy as np\n'), ((8607, 8636), 'numpy.sqrt', 'np.sqrt', (['(GRAV_SI * MsSI / gSI)'], {}), '(GRAV_SI * MsSI / gSI)\n', (8614, 8636), True, 'import numpy as np\n'), ((9404, 9418), 'numpy.log10', 'np.log10', (['(2.04)'], {}), '(2.04)\n', (9412, 9418), True, 'import numpy as np\n'), ((9433, 9472), 'numpy.log10', 'np.log10', (['(0.414 * (MJUP_SI / MEARTH_SI))'], {}), '(0.414 * (MJUP_SI / MEARTH_SI))\n', (9441, 9472), True, 'import numpy as np\n'), ((9485, 9523), 'numpy.log10', 'np.log10', (['(0.08 * (MSUN_SI / MEARTH_SI))'], {}), '(0.08 * (MSUN_SI / MEARTH_SI))\n', (9493, 9523), True, 'import numpy as np\n'), ((9584, 9599), 'numpy.log10', 'np.log10', (['(1.008)'], {}), '(1.008)\n', (9592, 9599), True, 'import numpy as np\n'), ((9785, 9809), 'numpy.linspace', 'np.linspace', (['(-3)', '(5)', '(1000)'], {}), '(-3, 5, 1000)\n', (9796, 9809), True, 'import numpy as np\n'), ((9828, 9842), 'numpy.log10', 'np.log10', (['(2.04)'], {}), '(2.04)\n', (9836, 9842), True, 'import numpy as np\n'), ((9860, 9899), 'numpy.log10', 'np.log10', (['(0.414 * (MJUP_SI / MEARTH_SI))'], {}), '(0.414 * (MJUP_SI / MEARTH_SI))\n', (9868, 9899), True, 'import numpy as np\n'), ((9915, 9953), 'numpy.log10', 'np.log10', (['(0.08 * (MSUN_SI / MEARTH_SI))'], {}), '(0.08 * (MSUN_SI / MEARTH_SI))\n', (9923, 9953), True, 'import numpy as np\n'), ((10158, 10181), 'numpy.ones_like', 'np.ones_like', (['log10MpME'], {}), '(log10MpME)\n', (10170, 10181), True, 'import numpy as np\n'), ((13777, 13791), 'numpy.zeros', 'np.zeros', (['nAll'], {}), '(nAll)\n', (13785, 13791), True, 'import numpy as np\n'), ((14417, 14446), 'numpy.longdouble', 'np.longdouble', (['(6.62607015e-34)'], {}), '(6.62607015e-34)\n', (14430, 14446), True, 'import numpy as np\n'), ((14483, 14509), 'numpy.longdouble', 'np.longdouble', (['(299792458.0)'], {}), '(299792458.0)\n', (14496, 14509), True, 'import numpy as np\n'), ((14548, 14575), 'numpy.longdouble', 'np.longdouble', (['(1.380649e-23)'], {}), '(1.380649e-23)\n', (14561, 14575), True, 'import numpy as np\n'), ((15982, 16005), 'numpy.arange', 'np.arange', (['(0)', '(24 + m)', 'm'], {}), '(0, 24 + m, m)\n', (15991, 16005), True, 'import numpy as np\n'), ((21587, 21605), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (21597, 21605), True, 'import numpy as np\n'), ((21620, 21634), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (21626, 21634), True, 'import numpy as np\n'), ((21869, 21888), 'numpy.min', 'np.min', (['box_values3'], {}), '(box_values3)\n', (21875, 21888), True, 'import numpy as np\n'), ((21902, 21921), 'numpy.max', 'np.max', (['box_values3'], {}), '(box_values3)\n', (21908, 21921), True, 'import numpy as np\n'), ((686, 720), 'os.path.join', 'os.path.join', (['idir', '"""Bessel_V.dat"""'], {}), "(idir, 'Bessel_V.dat')\n", (698, 720), False, 'import pdb, sys, os, time, requests, json\n'), ((806, 840), 'os.path.join', 'os.path.join', (['idir', '"""Bessel_I.dat"""'], {}), "(idir, 'Bessel_I.dat')\n", (818, 840), False, 'import pdb, sys, os, time, requests, json\n'), ((1031, 1064), 'os.path.join', 'os.path.join', (['idir', '"""2MASS_J.dat"""'], {}), "(idir, '2MASS_J.dat')\n", (1043, 1064), False, 'import pdb, sys, os, time, requests, json\n'), ((1144, 1177), 'os.path.join', 'os.path.join', (['idir', '"""2MASS_H.dat"""'], {}), "(idir, '2MASS_H.dat')\n", (1156, 1177), False, 'import pdb, sys, os, time, requests, json\n'), ((1257, 1291), 'os.path.join', 'os.path.join', (['idir', '"""2MASS_Ks.dat"""'], {}), "(idir, '2MASS_Ks.dat')\n", (1269, 1291), False, 'import pdb, sys, os, time, requests, json\n'), ((1372, 1384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1382, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1595, 1607), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1605, 1607), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3125), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (3116, 3125), False, 'import pdb, sys, os, time, requests, json\n'), ((3149, 3170), 'urllib.parse.quote', 'urlencode', (['req_string'], {}), '(req_string)\n', (3158, 3170), True, 'from urllib.parse import quote as urlencode\n'), ((3232, 3305), 'requests.post', 'requests.post', (['request_url'], {'data': "('request=' + req_string)", 'headers': 'headers'}), "(request_url, data='request=' + req_string, headers=headers)\n", (3245, 3305), False, 'import pdb, sys, os, time, requests, json\n'), ((3806, 3827), 'json.loads', 'json.loads', (['outString'], {}), '(outString)\n', (3816, 3827), False, 'import pdb, sys, os, time, requests, json\n'), ((4401, 4431), 'numpy.array', 'np.array', (['maglist'], {'dtype': 'float'}), '(maglist, dtype=float)\n', (4409, 4431), True, 'import numpy as np\n'), ((7324, 7400), 'os.path.join', 'os.path.join', (["os.environ['PYSYN_CDBS']", '"""calspec"""', '"""alpha_lyr_stis_010.fits"""'], {}), "(os.environ['PYSYN_CDBS'], 'calspec', 'alpha_lyr_stis_010.fits')\n", (7336, 7400), False, 'import pdb, sys, os, time, requests, json\n'), ((7458, 7487), 'pysynphot.FileSpectrum', 'pysynphot.FileSpectrum', (['ipath'], {}), '(ipath)\n', (7480, 7487), False, 'import pysynphot\n'), ((7498, 7510), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7508, 7510), True, 'import matplotlib.pyplot as plt\n'), ((7667, 7683), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 2]'], {}), '([0, 2])\n', (7675, 7683), True, 'import matplotlib.pyplot as plt\n'), ((7696, 7713), 'matplotlib.pyplot.title', 'plt.title', (['"""Vega"""'], {}), "('Vega')\n", (7705, 7713), True, 'import matplotlib.pyplot as plt\n'), ((7832, 7869), 'os.path.join', 'os.path.join', (['idir', '"""contours_h2.txt"""'], {}), "(idir, 'contours_h2.txt')\n", (7844, 7869), False, 'import pdb, sys, os, time, requests, json\n'), ((7896, 7934), 'os.path.join', 'os.path.join', (['idir', '"""contours_h2o.txt"""'], {}), "(idir, 'contours_h2o.txt')\n", (7908, 7934), False, 'import pdb, sys, os, time, requests, json\n'), ((7964, 8005), 'os.path.join', 'os.path.join', (['idir', '"""contours_mgsio3.txt"""'], {}), "(idir, 'contours_mgsio3.txt')\n", (7976, 8005), False, 'import pdb, sys, os, time, requests, json\n'), ((8043, 8080), 'os.path.join', 'os.path.join', (['idir', '"""contours_fe.txt"""'], {}), "(idir, 'contours_fe.txt')\n", (8055, 8080), False, 'import pdb, sys, os, time, requests, json\n'), ((10432, 10449), 'numpy.log10', 'np.log10', (['RpRE_in'], {}), '(RpRE_in)\n', (10440, 10449), True, 'import numpy as np\n'), ((10528, 10540), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10538, 10540), True, 'import matplotlib.pyplot as plt\n'), ((10549, 10597), 'matplotlib.pyplot.plot', 'plt.plot', (['log10MpME[ixs1]', 'log10RpRE[ixs1]', '"""-r"""'], {}), "(log10MpME[ixs1], log10RpRE[ixs1], '-r')\n", (10557, 10597), True, 'import matplotlib.pyplot as plt\n'), ((10608, 10656), 'matplotlib.pyplot.plot', 'plt.plot', (['log10MpME[ixs2]', 'log10RpRE[ixs2]', '"""-k"""'], {}), "(log10MpME[ixs2], log10RpRE[ixs2], '-k')\n", (10616, 10656), True, 'import matplotlib.pyplot as plt\n'), ((10667, 10715), 'matplotlib.pyplot.plot', 'plt.plot', (['log10MpME[ixs3]', 'log10RpRE[ixs3]', '"""-g"""'], {}), "(log10MpME[ixs3], log10RpRE[ixs3], '-g')\n", (10675, 10715), True, 'import matplotlib.pyplot as plt\n'), ((10726, 10774), 'matplotlib.pyplot.plot', 'plt.plot', (['log10MpME[ixs4]', 'log10RpRE[ixs4]', '"""-c"""'], {}), "(log10MpME[ixs4], log10RpRE[ixs4], '-c')\n", (10734, 10774), True, 'import matplotlib.pyplot as plt\n'), ((10828, 10843), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10841, 10843), False, 'import pdb, sys, os, time, requests, json\n'), ((13121, 13136), 'numpy.arange', 'np.arange', (['nAll'], {}), '(nAll)\n', (13130, 13136), True, 'import numpy as np\n'), ((13139, 13159), 'numpy.isfinite', 'np.isfinite', (['RpValRE'], {}), '(RpValRE)\n', (13150, 13159), True, 'import numpy as np\n'), ((13174, 13189), 'numpy.arange', 'np.arange', (['nAll'], {}), '(nAll)\n', (13183, 13189), True, 'import numpy as np\n'), ((6422, 6460), 'numpy.interp', 'np.interp', (['inM[:, 0]', 'vega[0]', 'vega[1]'], {}), '(inM[:, 0], vega[0], vega[1])\n', (6431, 6460), True, 'import numpy as np\n'), ((6493, 6529), 'numpy.interp', 'np.interp', (['M[:, 0]', 'vega[0]', 'vega[1]'], {}), '(M[:, 0], vega[0], vega[1])\n', (6502, 6529), True, 'import numpy as np\n'), ((6634, 6672), 'numpy.interp', 'np.interp', (['inM[:, 0]', 'star[0]', 'star[1]'], {}), '(inM[:, 0], star[0], star[1])\n', (6643, 6672), True, 'import numpy as np\n'), ((6705, 6741), 'numpy.interp', 'np.interp', (['M[:, 0]', 'star[0]', 'star[1]'], {}), '(M[:, 0], star[0], star[1])\n', (6714, 6741), True, 'import numpy as np\n'), ((6874, 6923), 'numpy.log10', 'np.log10', (['(Fvega_M / Fvega_I * (Fstar_I / Fstar_M))'], {}), '(Fvega_M / Fvega_I * (Fstar_I / Fstar_M))\n', (6882, 6923), True, 'import numpy as np\n'), ((13192, 13212), 'numpy.isfinite', 'np.isfinite', (['RpValRE'], {}), '(RpValRE)\n', (13203, 13212), True, 'import numpy as np\n'), ((14687, 14705), 'numpy.exp', 'np.exp', (['(c1 / wavSI)'], {}), '(c1 / wavSI)\n', (14693, 14705), True, 'import numpy as np\n'), ((14839, 14851), 'numpy.sqrt', 'np.sqrt', (['aRs'], {}), '(aRs)\n', (14846, 14851), True, 'import numpy as np\n'), ((18239, 18262), 'numpy.array', 'np.array', (['[178, 24, 43]'], {}), '([178, 24, 43])\n', (18247, 18262), True, 'import numpy as np\n'), ((21707, 21730), 'numpy.abs', 'np.abs', (['(value - box_avg)'], {}), '(value - box_avg)\n', (21713, 21730), True, 'import numpy as np\n'), ((21980, 21998), 'numpy.isfinite', 'np.isfinite', (['value'], {}), '(value)\n', (21991, 21998), True, 'import numpy as np\n'), ((11150, 11163), 'numpy.ndim', 'np.ndim', (['RpRE'], {}), '(RpRE)\n', (11157, 11163), True, 'import numpy as np\n'), ((11494, 11513), 'numpy.zeros_like', 'np.zeros_like', (['RpRE'], {}), '(RpRE)\n', (11507, 11513), True, 'import numpy as np\n'), ((18323, 18346), 'numpy.array', 'np.array', (['[252, 78, 42]'], {}), '([252, 78, 42])\n', (18331, 18346), True, 'import numpy as np\n'), ((21797, 21815), 'numpy.isfinite', 'np.isfinite', (['value'], {}), '(value)\n', (21808, 21815), True, 'import numpy as np\n'), ((4583, 4594), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (4591, 4594), True, 'import numpy as np\n'), ((18406, 18430), 'numpy.array', 'np.array', (['[253, 141, 60]'], {}), '([253, 141, 60])\n', (18414, 18430), True, 'import numpy as np\n'), ((20142, 20153), 'math.sin', 'math.sin', (['i'], {}), '(i)\n', (20150, 20153), False, 'import math\n'), ((18491, 18515), 'numpy.array', 'np.array', (['[254, 178, 76]'], {}), '([254, 178, 76])\n', (18499, 18515), True, 'import numpy as np\n'), ((18575, 18600), 'numpy.array', 'np.array', (['[254, 217, 118]'], {}), '([254, 217, 118])\n', (18583, 18600), True, 'import numpy as np\n'), ((18661, 18686), 'numpy.array', 'np.array', (['[255, 237, 160]'], {}), '([255, 237, 160])\n', (18669, 18686), True, 'import numpy as np\n'), ((18746, 18771), 'numpy.array', 'np.array', (['[158, 202, 225]'], {}), '([158, 202, 225])\n', (18754, 18771), True, 'import numpy as np\n'), ((18832, 18857), 'numpy.array', 'np.array', (['[107, 174, 214]'], {}), '([107, 174, 214])\n', (18840, 18857), True, 'import numpy as np\n'), ((18891, 18913), 'numpy.array', 'np.array', (['[8, 81, 156]'], {}), '([8, 81, 156])\n', (18899, 18913), True, 'import numpy as np\n')] |
from typing import Tuple, List, Dict
from numpy.typing import NDArray
import numpy as np
import pandas as pd
from ..ray_tracer.obj_reader import ObjToTriangles
from ...utils import VecNorm, VecDistance, VecAngle, PosBetweenXZ, SortPointsFromPlaneY
from ...utils.constants import EPSILON, MIN_ROOF_EDGE_DISTANCE, ROOF_MIN_ANGLE, ROOF_MAX_SCAN, MAX_FLT
from .bvh import BVH
class Tracer:
min_bound = None
max_bound = None
ref_max = None
def __init__(self, object_file_path, ref_max=2):
"""
Initialize Map for Ray Tracer
:param object_file_path:
"""
self.triangles = ObjToTriangles(object_file_path)
self.map = BVH(self.triangles)
self.min_bound = self.map.root.min_bound
self.max_bound = self.map.root.max_bound
self.ref_max = ref_max
def trace_outdoor(self, tx_pos: List[float], rx_pos: List[float]):
"""
Trace the possible ray paths from tx_pos to rx_pos in outdoor scenario (open sky)
:param tx_pos: Transmitting Position
:param rx_pos: Receiving Position
:return: Outdoor Tracing Result
"""
tx_pos = np.array(tx_pos)
rx_pos = np.array(rx_pos)
result = {
"direct": True,
"reflections": [],
"roof_edges": [],
'tx_pos': tx_pos,
'rx_pos': rx_pos
}
if self.direct_path(tx_pos, rx_pos):
result["direct"] = True
else:
result["direct"] = False
sorted_edges = SortPointsFromPlaneY(tx_pos, self.trace_roof_edges(tx_pos, rx_pos))
result["roof_edges"] = sorted_edges
result['reflections'] = self.trace_reflections(tx_pos, rx_pos)
return result
@staticmethod
def make_ray(tx_pos: NDArray, rx_pos: NDArray) -> Tuple[NDArray, NDArray]:
tx_pos = np.array(tx_pos)
rx_pos = np.array(rx_pos)
ray_org: NDArray = tx_pos
ray_dir: NDArray = VecNorm(rx_pos - tx_pos)
ray = (ray_org, ray_dir)
return ray
def direct_path(self, tx_pos: NDArray, rx_pos: NDArray):
"""
Check Direct Path
:param tx_pos: Transmitting Position
:param rx_pos: Receiving Position
:return: true if tx_pos and rx_pos are in line-of-sight.
"""
ray = Tracer.make_ray(tx_pos, rx_pos)
point_distance = VecDistance(tx_pos, rx_pos)
nearest_hit = self.map.is_intersect(ray)
if nearest_hit < 0:
return True
return nearest_hit > point_distance
@staticmethod
def get_mirror_point(pos: NDArray, triangle: 'Triangle') -> NDArray:
normal = triangle.normal
b = np.dot(normal, triangle.pointB)
c = np.dot(pos, normal)
d = np.dot(normal, normal)
if d == 0:
d = EPSILON
t = (b - c) / d
return pos + normal * 2 * t
def trace_reflections(self, tx_pos: NDArray, rx_pos: NDArray) -> Dict:
"""
Trace Reflection Points
:param tx_pos: Transmitting Position
:param rx_pos: Receiving Position
:return: reflection points
"""
reflections = {
'single': self.trace_single_reflect(tx_pos, rx_pos),
'double': self.trace_double_reflect(tx_pos, rx_pos)
}
return reflections
def trace_single_reflect(self, tx_pos, rx_pos) -> List[NDArray]:
if self.ref_max < 1:
return []
single_reflections = []
for triangle in self.map.root.triangles:
mirror_point = Tracer.get_mirror_point(tx_pos, triangle)
dir_mirror_to_rx = VecNorm(rx_pos - mirror_point)
ray = (mirror_point, dir_mirror_to_rx)
mirror_to_rx_dist = triangle.is_intersect(ray)
if mirror_to_rx_dist < 0:
continue
point_on_triangle = mirror_point + dir_mirror_to_rx * (mirror_to_rx_dist + EPSILON)
if self.direct_path(tx_pos, point_on_triangle) and \
self.direct_path(rx_pos, point_on_triangle):
single_reflections.append(point_on_triangle)
return single_reflections
def trace_double_reflect(self, tx_pos, rx_pos):
if self.ref_max < 2:
return []
double_reflections = []
tx_mirror_points = []
rx_mirror_points = []
triangle_n = len(self.map.root.triangles)
for triangle in self.map.root.triangles:
tx_mirror_point = Tracer.get_mirror_point(tx_pos, triangle)
rx_mirror_point = Tracer.get_mirror_point(rx_pos, triangle)
tx_mirror_points.append(tx_mirror_point)
rx_mirror_points.append(rx_mirror_point)
for tx_mirror_i in range(triangle_n):
tx_mirror_point = tx_mirror_points[tx_mirror_i]
tx_triangle = self.map.root.triangles[tx_mirror_i]
for rx_mirror_i in range(tx_mirror_i + 1, triangle_n):
rx_mirror_point = rx_mirror_points[rx_mirror_i]
rx_triangle = self.map.root.triangles[rx_mirror_i]
tx_mirror_to_rx_mirror_dir = VecNorm(rx_mirror_point - tx_mirror_point)
rx_mirror_to_tx_mirror_dir = -tx_mirror_to_rx_mirror_dir
tx_mirror_ray = (tx_mirror_point, tx_mirror_to_rx_mirror_dir)
rx_mirror_ray = (rx_mirror_point, rx_mirror_to_tx_mirror_dir)
tx_mirror_to_rx_mirror_dist = tx_triangle.is_intersect(tx_mirror_ray)
rx_mirror_to_tx_mirror_dist = rx_triangle.is_intersect(rx_mirror_ray)
if tx_mirror_to_rx_mirror_dist < 0 or rx_mirror_to_tx_mirror_dist < 0:
continue
tx_point_on_triangle = tx_mirror_point + tx_mirror_to_rx_mirror_dir * (
tx_mirror_to_rx_mirror_dist + EPSILON)
rx_point_on_triangle = rx_mirror_point + rx_mirror_to_tx_mirror_dir * (
rx_mirror_to_tx_mirror_dist + EPSILON)
if self.direct_path(tx_pos, tx_point_on_triangle) and \
self.direct_path(tx_point_on_triangle, rx_point_on_triangle) and \
self.direct_path(rx_point_on_triangle, rx_pos):
double_reflections.append([tx_point_on_triangle, rx_point_on_triangle])
return double_reflections
def trace_roof_edges(self, tx_pos, rx_pos) -> List:
"""
Trace Knife Edges
:param tx_pos: Transmitting Position
:param rx_pos: Receiving Position
:return: Knife Edges
"""
edges = []
left_pos = tx_pos
right_pos = rx_pos
current_scan = 0
while not self.direct_path(left_pos, right_pos):
if current_scan > ROOF_MAX_SCAN:
return edges
edge_left = self.find_edge(left_pos, right_pos)
if edge_left is None:
return []
edge_right = self.find_edge(right_pos, left_pos)
if edge_right is None:
return []
if self.direct_path(edge_left, edge_right):
if VecDistance(edge_left, edge_right) < MIN_ROOF_EDGE_DISTANCE:
avg_edge = (edge_left + edge_right) / 2
edges.append(avg_edge)
return edges
edges.append(edge_left)
edges.append(edge_right)
return edges
edges.append(edge_left)
edges.append(edge_right)
left_pos = edge_left
right_pos = edge_right
return []
def find_edge(self, left_pos, right_pos):
min_x = min(left_pos[0], right_pos[0])
max_x = max(left_pos[0], right_pos[0])
min_z = min(left_pos[2], right_pos[2])
max_z = max(left_pos[2], right_pos[2])
top_direction = np.array([0, 1, 0])
upper_ray = (left_pos, top_direction)
if self.map.is_intersect(upper_ray) > 0:
return None
lower_ray = (left_pos, VecNorm(right_pos - left_pos))
current_scan = 0
while current_scan < ROOF_MAX_SCAN and \
VecAngle(upper_ray[1], lower_ray[1]) > ROOF_MIN_ANGLE:
current_scan += 1
new_dir = VecNorm((upper_ray[1] + lower_ray[1]) / 2)
check_ray = (left_pos, new_dir)
hit_nearest = self.map.is_intersect(check_ray)
if hit_nearest > 0 and PosBetweenXZ(min_x, max_x, min_z, max_z, left_pos + new_dir * hit_nearest):
lower_ray = check_ray
else:
upper_ray = check_ray
distance = self.map.is_intersect(lower_ray)
if distance < 0:
return None
left_pos_on_plane = np.array([left_pos[0], 0, left_pos[2]])
right_pos_on_plane = np.array([right_pos[0], 0, right_pos[2]])
plane_dir = VecNorm(right_pos_on_plane - left_pos_on_plane)
theta = VecAngle(plane_dir, lower_ray[1])
x_angle = VecAngle(lower_ray[1], upper_ray[1])
height = distance * np.cos(theta) * np.tan(theta + x_angle)
width = distance * np.cos(theta)
edge_distance = np.sqrt(height ** 2 + width ** 2)
edge_pos = upper_ray[0] + upper_ray[1] * edge_distance
return edge_pos
def is_outdoor(self, pos):
sky_pos = np.copy(pos)
sky_pos[1] = 1000
return self.direct_path(pos, sky_pos)
def terrain_height(self, x: float, z: float):
upper = np.array([x, 1000, z])
lower_ray = (upper, np.array([0, -1, 0]))
nearest_hit = self.map.is_intersect(lower_ray)
if nearest_hit == -1:
return 0
return 1000 - nearest_hit
def get_terrain_depth(self, x_n, z_n):
x_min, x_max = self.min_bound[0], self.max_bound[2]
z_min, z_max = self.min_bound[0], self.max_bound[2]
assert x_min < x_max
assert z_min < z_max
depth_map = []
for x in np.linspace(x_min, x_max, x_n):
for z in np.linspace(z_min, z_max, z_n):
height = self.terrain_height(x, z)
if height != -1:
depth_map.append({'x': x, 'z': z, 'height': height})
return pd.DataFrame(depth_map)
| [
"numpy.copy",
"numpy.sqrt",
"numpy.tan",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"numpy.cos",
"pandas.DataFrame"
] | [((1164, 1180), 'numpy.array', 'np.array', (['tx_pos'], {}), '(tx_pos)\n', (1172, 1180), True, 'import numpy as np\n'), ((1198, 1214), 'numpy.array', 'np.array', (['rx_pos'], {}), '(rx_pos)\n', (1206, 1214), True, 'import numpy as np\n'), ((1877, 1893), 'numpy.array', 'np.array', (['tx_pos'], {}), '(tx_pos)\n', (1885, 1893), True, 'import numpy as np\n'), ((1911, 1927), 'numpy.array', 'np.array', (['rx_pos'], {}), '(rx_pos)\n', (1919, 1927), True, 'import numpy as np\n'), ((2711, 2742), 'numpy.dot', 'np.dot', (['normal', 'triangle.pointB'], {}), '(normal, triangle.pointB)\n', (2717, 2742), True, 'import numpy as np\n'), ((2755, 2774), 'numpy.dot', 'np.dot', (['pos', 'normal'], {}), '(pos, normal)\n', (2761, 2774), True, 'import numpy as np\n'), ((2787, 2809), 'numpy.dot', 'np.dot', (['normal', 'normal'], {}), '(normal, normal)\n', (2793, 2809), True, 'import numpy as np\n'), ((7867, 7886), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (7875, 7886), True, 'import numpy as np\n'), ((8750, 8789), 'numpy.array', 'np.array', (['[left_pos[0], 0, left_pos[2]]'], {}), '([left_pos[0], 0, left_pos[2]])\n', (8758, 8789), True, 'import numpy as np\n'), ((8819, 8860), 'numpy.array', 'np.array', (['[right_pos[0], 0, right_pos[2]]'], {}), '([right_pos[0], 0, right_pos[2]])\n', (8827, 8860), True, 'import numpy as np\n'), ((9169, 9202), 'numpy.sqrt', 'np.sqrt', (['(height ** 2 + width ** 2)'], {}), '(height ** 2 + width ** 2)\n', (9176, 9202), True, 'import numpy as np\n'), ((9340, 9352), 'numpy.copy', 'np.copy', (['pos'], {}), '(pos)\n', (9347, 9352), True, 'import numpy as np\n'), ((9492, 9514), 'numpy.array', 'np.array', (['[x, 1000, z]'], {}), '([x, 1000, z])\n', (9500, 9514), True, 'import numpy as np\n'), ((9967, 9997), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'x_n'], {}), '(x_min, x_max, x_n)\n', (9978, 9997), True, 'import numpy as np\n'), ((10224, 10247), 'pandas.DataFrame', 'pd.DataFrame', (['depth_map'], {}), '(depth_map)\n', (10236, 10247), True, 'import pandas as pd\n'), ((9080, 9103), 'numpy.tan', 'np.tan', (['(theta + x_angle)'], {}), '(theta + x_angle)\n', (9086, 9103), True, 'import numpy as np\n'), ((9131, 9144), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9137, 9144), True, 'import numpy as np\n'), ((9543, 9563), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (9551, 9563), True, 'import numpy as np\n'), ((10020, 10050), 'numpy.linspace', 'np.linspace', (['z_min', 'z_max', 'z_n'], {}), '(z_min, z_max, z_n)\n', (10031, 10050), True, 'import numpy as np\n'), ((9064, 9077), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9070, 9077), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import multivariate_normal as normal
import matplotlib.pyplot as plt
from matplotlib import cm
from itertools import product
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
from reactions import GMM as tf_GMM
class GMM:
def __init__(self, n=6, ndim=3, cov=0.15, record=False):
self.n = n
self.ndim = ndim
self.record = record
self.cov = cov
self.refresh()
def refresh(self):
self.m = [np.random.rand(self.ndim) for _ in range(self.n)]
self.cov = [np.random.normal(self.cov, self.cov/5, size=self.ndim)
for _ in range(self.n)]
self.param = np.random.normal(loc=0, scale=0.2, size=self.n)
self.param /= np.sum(np.abs(self.param))
if self.record:
self.history = {'x':[], 'y':[]}
self.cst = (2 * 3.14159) ** (- self.ndim / 2)
modes = np.array([1/np.prod(cov) for cov in self.cov])
modes = modes * self.param
self.tops = np.max(modes)
self.bots = np.min(modes)
def __call__(self, x):
y = [normal.pdf(x, self.m[i], self.cov[i]) for i in range(self.n)]
fx = np.asscalar(
np.dot(
self.param.reshape((1, -1)),
np.array(y).reshape((-1, 1)))/self.n)
result = (fx / self.cst - self.bots) / (self.tops - self.bots)
if self.record:
self.history['x'].append(x)
self.history['y'].append(result)
return result
def test_1d():
gmm = GMM(ndim=1)
x = np.arange(0, 1, 0.01)
y = [gmm(i) for i in x]
plt.figure(1)
plt.plot(x, y)
plt.show()
def test_2d():
gmm = GMM(ndim=2)
xr = list(np.arange(0, 1, 0.02))
X = np.array(list(product(xr, repeat=2)))
Y = [gmm(i) for i in X]
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.plot_trisurf(X[:, 0], X[:, 1], Y)
fig.show()
plt.show()
def test_tf():
xr = list(np.arange(0, 1, 0.02))
X = np.array(list(product(xr, repeat=2)))
Y = []
with tf.compat.v1.Session() as sess:
gmm = tf_GMM(batch_size=1, ncoef=6, num_dims=2, cov=0.5)
y = gmm(tf.placeholder(tf.float32, shape=[1, 2], name='x'))
sess.run(tf.compat.v1.global_variables_initializer())
for x in X:
Y.append(sess.run(y, feed_dict={'x:0':x.reshape((1, 2))}))
cmap = cm.get_cmap('rainbow')
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.plot_trisurf(X[:, 0], X[:, 1], np.squeeze(Y),
linewidth=0.0, antialiased=True,
cmap=cmap)
fig.show()
plt.show()
if __name__ == '__main__':
test_tf()
| [
"numpy.prod",
"numpy.random.rand",
"numpy.array",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.arange",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"itertools.product",
"numpy.max",
"numpy.min",
"matplotlib.cm.get_cmap",
"numpy.random.no... | [((1571, 1592), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (1580, 1592), True, 'import numpy as np\n'), ((1625, 1638), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1635, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1657), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1651, 1657), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1672), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1670, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1845), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1842, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1940, 1950), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1948, 1950), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2422), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (2411, 2422), False, 'from matplotlib import cm\n'), ((2433, 2446), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2443, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2637, 2647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2645, 2647), True, 'import matplotlib.pyplot as plt\n'), ((686, 733), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(0.2)', 'size': 'self.n'}), '(loc=0, scale=0.2, size=self.n)\n', (702, 733), True, 'import numpy as np\n'), ((1024, 1037), 'numpy.max', 'np.max', (['modes'], {}), '(modes)\n', (1030, 1037), True, 'import numpy as np\n'), ((1058, 1071), 'numpy.min', 'np.min', (['modes'], {}), '(modes)\n', (1064, 1071), True, 'import numpy as np\n'), ((1725, 1746), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.02)'], {}), '(0, 1, 0.02)\n', (1734, 1746), True, 'import numpy as np\n'), ((1981, 2002), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.02)'], {}), '(0, 1, 0.02)\n', (1990, 2002), True, 'import numpy as np\n'), ((2070, 2092), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2090, 2092), True, 'import tensorflow as tf\n'), ((2116, 2166), 'reactions.GMM', 'tf_GMM', ([], {'batch_size': '(1)', 'ncoef': '(6)', 'num_dims': '(2)', 'cov': '(0.5)'}), '(batch_size=1, ncoef=6, num_dims=2, cov=0.5)\n', (2122, 2166), True, 'from reactions import GMM as tf_GMM\n'), ((2519, 2532), 'numpy.squeeze', 'np.squeeze', (['Y'], {}), '(Y)\n', (2529, 2532), True, 'import numpy as np\n'), ((496, 521), 'numpy.random.rand', 'np.random.rand', (['self.ndim'], {}), '(self.ndim)\n', (510, 521), True, 'import numpy as np\n'), ((566, 622), 'numpy.random.normal', 'np.random.normal', (['self.cov', '(self.cov / 5)'], {'size': 'self.ndim'}), '(self.cov, self.cov / 5, size=self.ndim)\n', (582, 622), True, 'import numpy as np\n'), ((763, 781), 'numpy.abs', 'np.abs', (['self.param'], {}), '(self.param)\n', (769, 781), True, 'import numpy as np\n'), ((1113, 1150), 'scipy.stats.multivariate_normal.pdf', 'normal.pdf', (['x', 'self.m[i]', 'self.cov[i]'], {}), '(x, self.m[i], self.cov[i])\n', (1123, 1150), True, 'from scipy.stats import multivariate_normal as normal\n'), ((1770, 1791), 'itertools.product', 'product', (['xr'], {'repeat': '(2)'}), '(xr, repeat=2)\n', (1777, 1791), False, 'from itertools import product\n'), ((2026, 2047), 'itertools.product', 'product', (['xr'], {'repeat': '(2)'}), '(xr, repeat=2)\n', (2033, 2047), False, 'from itertools import product\n'), ((2183, 2233), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1, 2]', 'name': '"""x"""'}), "(tf.float32, shape=[1, 2], name='x')\n", (2197, 2233), True, 'import tensorflow as tf\n'), ((2252, 2295), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2293, 2295), True, 'import tensorflow as tf\n'), ((934, 946), 'numpy.prod', 'np.prod', (['cov'], {}), '(cov)\n', (941, 946), True, 'import numpy as np\n'), ((1283, 1294), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1291, 1294), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path="hair_segmentation_512x512_float32.tflite")
# interpreter = tf.lite.Interpreter(model_path="hair_segmentation_512x512_weight_quant.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print('input:', input_details)
print('')
print('output:', output_details)
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print('output_data.shape:', output_data.shape)
import cv2
| [
"tensorflow.lite.Interpreter",
"numpy.random.random_sample"
] | [((58, 132), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""hair_segmentation_512x512_float32.tflite"""'}), "(model_path='hair_segmentation_512x512_float32.tflite')\n", (77, 132), True, 'import tensorflow as tf\n'), ((497, 533), 'numpy.random.random_sample', 'np.random.random_sample', (['input_shape'], {}), '(input_shape)\n', (520, 533), True, 'import numpy as np\n')] |
"""Script to evaluate a dataset fold under a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from magenta.models.coconet import lib_data
from magenta.models.coconet import lib_evaluation
from magenta.models.coconet import lib_graph
from magenta.models.coconet import lib_util
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('data_dir', None,
'Path to the base directory for different datasets.')
flags.DEFINE_string('eval_logdir', None,
'Path to the base directory for saving evaluation '
'statistics.')
flags.DEFINE_string('fold', None,
'Data fold on which to evaluate (valid or test)')
flags.DEFINE_string('fold_index', None,
'Optionally, index of particular data point in fold to '
'evaluate.')
flags.DEFINE_string('unit', None, 'Note or frame or example.')
flags.DEFINE_integer('ensemble_size', 5,
'Number of ensemble members to average.')
flags.DEFINE_bool('chronological', False,
'Indicates evaluation should proceed in chronological order.')
flags.DEFINE_string('checkpoint', None, 'Path to checkpoint directory.')
flags.DEFINE_string('sample_npy_path', None, 'Path to samples to be evaluated.')
EVAL_SUBDIR = 'eval_stats'
def main(unused_argv):
checkpoint_dir = FLAGS.checkpoint
if not checkpoint_dir:
# If a checkpoint directory is not specified, see if there is only one
# subdir in this folder and use that.
possible_checkpoint_dirs = tf.gfile.ListDirectory(FLAGS.eval_logdir)
possible_checkpoint_dirs = [
i for i in possible_checkpoint_dirs if
tf.gfile.IsDirectory(os.path.join(FLAGS.eval_logdir, i))]
if EVAL_SUBDIR in possible_checkpoint_dirs:
possible_checkpoint_dirs.remove(EVAL_SUBDIR)
if len(possible_checkpoint_dirs) == 1:
checkpoint_dir = os.path.join(
FLAGS.eval_logdir, possible_checkpoint_dirs[0])
tf.logging.info('Using checkpoint dir: %s', checkpoint_dir)
else:
raise ValueError(
'Need to provide a path to checkpoint directory or use an '
'eval_logdir with only 1 checkpoint subdirectory.')
wmodel = lib_graph.load_checkpoint(checkpoint_dir)
if FLAGS.eval_logdir is None:
raise ValueError(
'Set flag eval_logdir to specify a path for saving eval statistics.')
else:
eval_logdir = os.path.join(FLAGS.eval_logdir, EVAL_SUBDIR)
tf.gfile.MakeDirs(eval_logdir)
evaluator = lib_evaluation.BaseEvaluator.make(
FLAGS.unit, wmodel=wmodel, chronological=FLAGS.chronological)
evaluator = lib_evaluation.EnsemblingEvaluator(evaluator, FLAGS.ensemble_size)
if not FLAGS.sample_npy_path and FLAGS.fold is None:
raise ValueError(
'Either --fold must be specified, or paths of npy files to load must '
'be given, but not both.')
if FLAGS.fold is not None:
evaluate_fold(
FLAGS.fold, evaluator, wmodel.hparams, eval_logdir, checkpoint_dir)
if FLAGS.sample_npy_path is not None:
evaluate_paths([FLAGS.sample_npy_path], evaluator, wmodel.hparams,
eval_logdir)
tf.logging.info('Done')
def evaluate_fold(fold, evaluator, hparams, eval_logdir, checkpoint_dir):
"""Writes to file the neg. loglikelihood of given fold (train/valid/test)."""
eval_run_name = 'eval_%s_%s%s_%s_ensemble%s_chrono%s' % (
lib_util.timestamp(), fold,
'' if FLAGS.fold_index is None else FLAGS.fold_index, FLAGS.unit,
FLAGS.ensemble_size, FLAGS.chronological)
log_fname = '%s__%s.npz' % (os.path.basename(checkpoint_dir), eval_run_name)
log_fpath = os.path.join(eval_logdir, log_fname)
pianorolls = get_fold_pianorolls(fold, hparams)
rval = lib_evaluation.evaluate(evaluator, pianorolls)
tf.logging.info('Writing to path: %s' % log_fpath)
with lib_util.atomic_file(log_fpath) as p:
np.savez_compressed(p, **rval)
def evaluate_paths(paths, evaluator, unused_hparams, eval_logdir):
"""Evaluates negative loglikelihood of pianorolls from given paths."""
for path in paths:
name = 'eval_samples_%s_%s_ensemble%s_chrono%s' % (lib_util.timestamp(),
FLAGS.unit,
FLAGS.ensemble_size,
FLAGS.chronological)
log_fname = '%s__%s.npz' % (os.path.splitext(os.path.basename(path))[0],
name)
log_fpath = os.path.join(eval_logdir, log_fname)
pianorolls = get_path_pianorolls(path)
rval = lib_evaluation.evaluate(evaluator, pianorolls)
tf.logging.info('Writing evaluation statistics to %s', log_fpath)
with lib_util.atomic_file(log_fpath) as p:
np.savez_compressed(p, **rval)
def get_fold_pianorolls(fold, hparams):
dataset = lib_data.get_dataset(FLAGS.data_dir, hparams, fold)
pianorolls = dataset.get_pianorolls()
tf.logging.info('Retrieving pianorolls from %s set of %s dataset.',
fold, hparams.dataset)
print_statistics(pianorolls)
if FLAGS.fold_index is not None:
pianorolls = [pianorolls[int(FLAGS.fold_index)]]
return pianorolls
def get_path_pianorolls(path):
pianoroll_fpath = os.path.join(tf.resource_loader.get_data_files_path(), path)
tf.logging.info('Retrieving pianorolls from %s', pianoroll_fpath)
with tf.gfile.Open(pianoroll_fpath, 'r') as p:
pianorolls = np.load(p)
if isinstance(pianorolls, np.ndarray):
tf.logging.info(pianorolls.shape)
print_statistics(pianorolls)
return pianorolls
def print_statistics(pianorolls):
"""Prints statistics of given pianorolls, such as max and unique length."""
if isinstance(pianorolls, np.ndarray):
tf.logging.info(pianorolls.shape)
tf.logging.info('# of total pieces in set: %d', len(pianorolls))
lengths = [len(roll) for roll in pianorolls]
if len(np.unique(lengths)) > 1:
tf.logging.info('lengths %s', np.sort(lengths))
tf.logging.info('max_len %d', max(lengths))
tf.logging.info(
'unique lengths %s',
np.unique(sorted(pianoroll.shape[0] for pianoroll in pianorolls)))
tf.logging.info('shape %s', pianorolls[0].shape)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"magenta.models.coconet.lib_evaluation.BaseEvaluator.make",
"tensorflow.logging.set_verbosity",
"magenta.models.coconet.lib_graph.load_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.app.run",
"magenta.models.coconet.lib_evaluation.evaluate",
"numpy.sort",
"magenta.models.coconet.lib_data.get_da... | [((2340, 2381), 'magenta.models.coconet.lib_graph.load_checkpoint', 'lib_graph.load_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2365, 2381), False, 'from magenta.models.coconet import lib_graph\n'), ((2635, 2735), 'magenta.models.coconet.lib_evaluation.BaseEvaluator.make', 'lib_evaluation.BaseEvaluator.make', (['FLAGS.unit'], {'wmodel': 'wmodel', 'chronological': 'FLAGS.chronological'}), '(FLAGS.unit, wmodel=wmodel, chronological=\n FLAGS.chronological)\n', (2668, 2735), False, 'from magenta.models.coconet import lib_evaluation\n'), ((2752, 2818), 'magenta.models.coconet.lib_evaluation.EnsemblingEvaluator', 'lib_evaluation.EnsemblingEvaluator', (['evaluator', 'FLAGS.ensemble_size'], {}), '(evaluator, FLAGS.ensemble_size)\n', (2786, 2818), False, 'from magenta.models.coconet import lib_evaluation\n'), ((3280, 3303), 'tensorflow.logging.info', 'tf.logging.info', (['"""Done"""'], {}), "('Done')\n", (3295, 3303), True, 'import tensorflow as tf\n'), ((3767, 3803), 'os.path.join', 'os.path.join', (['eval_logdir', 'log_fname'], {}), '(eval_logdir, log_fname)\n', (3779, 3803), False, 'import os\n'), ((3865, 3911), 'magenta.models.coconet.lib_evaluation.evaluate', 'lib_evaluation.evaluate', (['evaluator', 'pianorolls'], {}), '(evaluator, pianorolls)\n', (3888, 3911), False, 'from magenta.models.coconet import lib_evaluation\n'), ((3914, 3964), 'tensorflow.logging.info', 'tf.logging.info', (["('Writing to path: %s' % log_fpath)"], {}), "('Writing to path: %s' % log_fpath)\n", (3929, 3964), True, 'import tensorflow as tf\n'), ((4982, 5033), 'magenta.models.coconet.lib_data.get_dataset', 'lib_data.get_dataset', (['FLAGS.data_dir', 'hparams', 'fold'], {}), '(FLAGS.data_dir, hparams, fold)\n', (5002, 5033), False, 'from magenta.models.coconet import lib_data\n'), ((5076, 5170), 'tensorflow.logging.info', 'tf.logging.info', (['"""Retrieving pianorolls from %s set of %s dataset."""', 'fold', 'hparams.dataset'], {}), "('Retrieving pianorolls from %s set of %s dataset.', fold,\n hparams.dataset)\n", (5091, 5170), True, 'import tensorflow as tf\n'), ((5440, 5505), 'tensorflow.logging.info', 'tf.logging.info', (['"""Retrieving pianorolls from %s"""', 'pianoroll_fpath'], {}), "('Retrieving pianorolls from %s', pianoroll_fpath)\n", (5455, 5505), True, 'import tensorflow as tf\n'), ((6273, 6321), 'tensorflow.logging.info', 'tf.logging.info', (['"""shape %s"""', 'pianorolls[0].shape'], {}), "('shape %s', pianorolls[0].shape)\n", (6288, 6321), True, 'import tensorflow as tf\n'), ((6353, 6394), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (6377, 6394), True, 'import tensorflow as tf\n'), ((6397, 6409), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6407, 6409), True, 'import tensorflow as tf\n'), ((1672, 1713), 'tensorflow.gfile.ListDirectory', 'tf.gfile.ListDirectory', (['FLAGS.eval_logdir'], {}), '(FLAGS.eval_logdir)\n', (1694, 1713), True, 'import tensorflow as tf\n'), ((2540, 2584), 'os.path.join', 'os.path.join', (['FLAGS.eval_logdir', 'EVAL_SUBDIR'], {}), '(FLAGS.eval_logdir, EVAL_SUBDIR)\n', (2552, 2584), False, 'import os\n'), ((2589, 2619), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['eval_logdir'], {}), '(eval_logdir)\n', (2606, 2619), True, 'import tensorflow as tf\n'), ((3972, 4003), 'magenta.models.coconet.lib_util.atomic_file', 'lib_util.atomic_file', (['log_fpath'], {}), '(log_fpath)\n', (3992, 4003), False, 'from magenta.models.coconet import lib_util\n'), ((4014, 4044), 'numpy.savez_compressed', 'np.savez_compressed', (['p'], {}), '(p, **rval)\n', (4033, 4044), True, 'import numpy as np\n'), ((4635, 4671), 'os.path.join', 'os.path.join', (['eval_logdir', 'log_fname'], {}), '(eval_logdir, log_fname)\n', (4647, 4671), False, 'import os\n'), ((4727, 4773), 'magenta.models.coconet.lib_evaluation.evaluate', 'lib_evaluation.evaluate', (['evaluator', 'pianorolls'], {}), '(evaluator, pianorolls)\n', (4750, 4773), False, 'from magenta.models.coconet import lib_evaluation\n'), ((4778, 4843), 'tensorflow.logging.info', 'tf.logging.info', (['"""Writing evaluation statistics to %s"""', 'log_fpath'], {}), "('Writing evaluation statistics to %s', log_fpath)\n", (4793, 4843), True, 'import tensorflow as tf\n'), ((5390, 5430), 'tensorflow.resource_loader.get_data_files_path', 'tf.resource_loader.get_data_files_path', ([], {}), '()\n', (5428, 5430), True, 'import tensorflow as tf\n'), ((5513, 5548), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['pianoroll_fpath', '"""r"""'], {}), "(pianoroll_fpath, 'r')\n", (5526, 5548), True, 'import tensorflow as tf\n'), ((5572, 5582), 'numpy.load', 'np.load', (['p'], {}), '(p)\n', (5579, 5582), True, 'import numpy as np\n'), ((5628, 5661), 'tensorflow.logging.info', 'tf.logging.info', (['pianorolls.shape'], {}), '(pianorolls.shape)\n', (5643, 5661), True, 'import tensorflow as tf\n'), ((5872, 5905), 'tensorflow.logging.info', 'tf.logging.info', (['pianorolls.shape'], {}), '(pianorolls.shape)\n', (5887, 5905), True, 'import tensorflow as tf\n'), ((2025, 2085), 'os.path.join', 'os.path.join', (['FLAGS.eval_logdir', 'possible_checkpoint_dirs[0]'], {}), '(FLAGS.eval_logdir, possible_checkpoint_dirs[0])\n', (2037, 2085), False, 'import os\n'), ((2103, 2162), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using checkpoint dir: %s"""', 'checkpoint_dir'], {}), "('Using checkpoint dir: %s', checkpoint_dir)\n", (2118, 2162), True, 'import tensorflow as tf\n'), ((3526, 3546), 'magenta.models.coconet.lib_util.timestamp', 'lib_util.timestamp', ([], {}), '()\n', (3544, 3546), False, 'from magenta.models.coconet import lib_util\n'), ((3704, 3736), 'os.path.basename', 'os.path.basename', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3720, 3736), False, 'import os\n'), ((4853, 4884), 'magenta.models.coconet.lib_util.atomic_file', 'lib_util.atomic_file', (['log_fpath'], {}), '(log_fpath)\n', (4873, 4884), False, 'from magenta.models.coconet import lib_util\n'), ((4897, 4927), 'numpy.savez_compressed', 'np.savez_compressed', (['p'], {}), '(p, **rval)\n', (4916, 4927), True, 'import numpy as np\n'), ((6029, 6047), 'numpy.unique', 'np.unique', (['lengths'], {}), '(lengths)\n', (6038, 6047), True, 'import numpy as np\n'), ((6088, 6104), 'numpy.sort', 'np.sort', (['lengths'], {}), '(lengths)\n', (6095, 6104), True, 'import numpy as np\n'), ((4263, 4283), 'magenta.models.coconet.lib_util.timestamp', 'lib_util.timestamp', ([], {}), '()\n', (4281, 4283), False, 'from magenta.models.coconet import lib_util\n'), ((1823, 1857), 'os.path.join', 'os.path.join', (['FLAGS.eval_logdir', 'i'], {}), '(FLAGS.eval_logdir, i)\n', (1835, 1857), False, 'import os\n'), ((4553, 4575), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4569, 4575), False, 'import os\n')] |
from pymesh import separate_mesh
from pymesh import merge_meshes
from pymesh import form_mesh
from pymesh import generate_box_mesh
from pymesh.TestCase import TestCase
import numpy as np
class SeparateMeshTest(TestCase):
def test_simple(self):
mesh_1 = generate_box_mesh(np.zeros(3), np.ones(3));
mesh_2 = generate_box_mesh(np.array([0.5, 0.5, 0.5]), np.ones(3));
out_mesh = merge_meshes([mesh_1, mesh_2]);
components = separate_mesh(out_mesh);
self.assertEqual(2, len(components));
for comp in components:
self.assertEqual(8, comp.num_vertices);
self.assertEqual(12, comp.num_faces);
def test_face_connectivity(self):
vertices = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 1, 1],
], dtype=float);
faces = np.array([
[0, 1, 2],
[2, 3, 4],
]);
mesh = form_mesh(vertices, faces);
components = separate_mesh(mesh, "vertex");
self.assertEqual(1, len(components));
components = separate_mesh(mesh, "face");
self.assertEqual(2, len(components));
| [
"numpy.ones",
"pymesh.merge_meshes",
"numpy.array",
"numpy.zeros",
"pymesh.form_mesh",
"pymesh.separate_mesh"
] | [((405, 435), 'pymesh.merge_meshes', 'merge_meshes', (['[mesh_1, mesh_2]'], {}), '([mesh_1, mesh_2])\n', (417, 435), False, 'from pymesh import merge_meshes\n'), ((459, 482), 'pymesh.separate_mesh', 'separate_mesh', (['out_mesh'], {}), '(out_mesh)\n', (472, 482), False, 'from pymesh import separate_mesh\n'), ((723, 801), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]'], {'dtype': 'float'}), '([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]], dtype=float)\n', (731, 801), True, 'import numpy as np\n'), ((895, 927), 'numpy.array', 'np.array', (['[[0, 1, 2], [2, 3, 4]]'], {}), '([[0, 1, 2], [2, 3, 4]])\n', (903, 927), True, 'import numpy as np\n'), ((984, 1010), 'pymesh.form_mesh', 'form_mesh', (['vertices', 'faces'], {}), '(vertices, faces)\n', (993, 1010), False, 'from pymesh import form_mesh\n'), ((1033, 1062), 'pymesh.separate_mesh', 'separate_mesh', (['mesh', '"""vertex"""'], {}), "(mesh, 'vertex')\n", (1046, 1062), False, 'from pymesh import separate_mesh\n'), ((1132, 1159), 'pymesh.separate_mesh', 'separate_mesh', (['mesh', '"""face"""'], {}), "(mesh, 'face')\n", (1145, 1159), False, 'from pymesh import separate_mesh\n'), ((285, 296), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (293, 296), True, 'import numpy as np\n'), ((298, 308), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (305, 308), True, 'import numpy as np\n'), ((346, 371), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (354, 371), True, 'import numpy as np\n'), ((373, 383), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (380, 383), True, 'import numpy as np\n')] |
# 2nd order rotational pressure correction for Stokes equation
# Author: <NAME>, <EMAIL>
import numpy as np
from sympy import symbols, sin, cos, lambdify
from shenfun import *
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter, ScalarFormatter
from mpltools import annotation
pa = {'fill': False, 'edgecolor': 'black'}
ta = {'fontsize': 10}
pex = lambda *args: print(*args) + exit(0)
x, y, t = symbols("x, y, t", real=True)
# Define the initial solution
uex = (sin(np.pi*x)**2)*sin(2*np.pi*y)*sin(t)
uey = -sin(2*np.pi*x)*(sin(np.pi*y)**2)*sin(t)
pe = cos(np.pi*x)*cos(np.pi*y)*sin(t)
fex = -uex.diff(x, 2) - uex.diff(y, 2) + pe.diff(x, 1) + uex.diff(t, 1)
fey = -uey.diff(x, 2) - uey.diff(y, 2) + pe.diff(y, 1) + uey.diff(t, 1)
he = uex.diff(x, 1) + uey.diff(y, 1)
uexf, ueyf, pef, fexf, feyf = map(lambda v: lambdify((x, y, t), v),
(uex, uey, pe, fex, fey))
def main(n):
# number of modes in x and y direction
N = (32, 32)
# basis function for velocity components in x and y directions: P_{N}
D0X = FunctionSpace(N[0], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
D0Y = FunctionSpace(N[1], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
# basis function for pressure: P_{N-2}
PX = FunctionSpace(N[0], 'Legendre', quad='GL')
PY = FunctionSpace(N[1], 'Legendre', quad='GL')
PX.slice = lambda: slice(0, N[0]-2)
PY.slice = lambda: slice(0, N[1]-2)
# define a multi-dimensional tensor product basis
Vs = TensorProductSpace(comm, (D0X, D0Y))
Ps = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
# Create vector space for velocity
Ws = VectorSpace([Vs, Vs])
# Create test and trial spaces for velocity and pressure
u = TrialFunction(Ws); v = TestFunction(Ws)
p = TrialFunction(Ps); q = TestFunction(Ps)
X = Vs.local_mesh(True)
# Define the initial solution on quadrature points at t=0
U = Array(Ws, buffer=(uex.subs(t, 0), uey.subs(t, 0)))
P = Array(Ps); P.fill(0)
F = Array(Ws, buffer=(fex.subs(t, 0), fey.subs(t, 0)))
# Define the coefficient vector
U_hat = Function(Ws); U_hat = Ws.forward(U, U_hat);
P_hat = Function(Ps); P_hat = Ps.forward(P, P_hat);
F_hat = Function(Ws); F_hat = Ws.forward(F, F_hat);
# Initial time, time step, final time
ti, dt, tf = 0., 5e-3/n, 5e-2
nsteps = np.int(np.ceil((tf - ti)/dt))
dt = (tf - ti)/nsteps
X = Ws.local_mesh(True)
# Define the implicit operator for BDF-2
Lb1 = BlockMatrix(inner(v, u*(1.5/dt)) + inner(grad(v), grad(u)))
Lb2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for Euler
Le1 = BlockMatrix(inner(v, u*(1./dt)) + inner(grad(v), grad(u)))
Le2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for updating
Lu1 = BlockMatrix([inner(v, u)])
Lu2 = BlockMatrix([inner(q, p)])
# temporary storage
rhsU, rhsP = Function(Ws), Function(Ps)
U0_hat = Function(Ws); U0_hat = Ws.forward(U, U0_hat);
Ut_hat = Function(Ws); Ut_hat = Ws.forward(U, Ut_hat);
P0_hat = Function(Ps); P0_hat = Ps.forward(P, P0_hat);
Phi_hat = Function(Ps); Phi_hat = Ps.forward(P, Phi_hat);
# integrate in time
time = ti
# storage
rhsU, rhsP = rhsU, rhsP
u_hat, p_hat = U_hat, P_hat
u0_hat, p0_hat = U0_hat, P0_hat
ut_hat, phi_hat = Ut_hat, Phi_hat
# Euler time-step
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat/dt)
ut_hat = Le1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += (1/dt)*inner(q, div(ut_hat))
phi_hat = Le2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# Update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat;
# Update (9.107)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, dt*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
# Update (9.105)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
time += dt
# BDF time step
for step in range(2, nsteps+1):
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat*2/dt) - inner(v, u0_hat*0.5/dt)
ut_hat = Lb1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += 1.5/dt*inner(q, div(ut_hat))
phi_hat = Lb2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat;
# Update (9.107, 9.105)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, ((2.*dt/3))*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
# increment time
time += dt
# Transform the solution to physical space
UP = [*U_hat.backward(U), P_hat.backward(P)]
# compute error
Ue = Array(Ws, buffer=(uex.subs(t, tf), uey.subs(t, tf)))
Pe = Array(Ps, buffer=(pe.subs(t, tf)))
UPe = [*Ue, Pe]
l2_error = list(map(np.linalg.norm, [u-ue for u, ue in zip(UP, UPe)]))
return l2_error
if __name__ == "__main__":
N = 2**np.arange(0,4)
E = np.zeros((3,len(N)))
for (j,n) in enumerate(N):
E[:,j] = main(n)
fig = plt.figure(figsize=(5.69,4.27))
ax = plt.gca()
marks = ('or', '-g', '-ob')
vars = (r'$u_x$', r'$u_y$', r'$p$')
for i in range(3):
plt.loglog(N, E[i,:], marks[i], label=vars[i])
slope, intercept = np.polyfit(np.log(N[-2:]), np.log(E[i,-2:]), 1)
if(i!=1):
annotation.slope_marker((N[-2], E[i,-2]), ("{0:.2f}".format(slope), 1),
ax=ax, poly_kwargs=pa, text_kwargs=ta)
plt.text(N[0], 2e-5, r"$\Delta t=5 \times 10^{-3},\; N=32^2$")
plt.text(N[0], 1e-5, r"Final Time = $5 \times 10^{-2}$")
plt.title(r"Stokes: $2^{nd}$-order Rotational Pressure-Correction")
plt.legend(); plt.autoscale();
plt.ylabel(r'$|Error|_{L^2}$')
plt.xticks(N);
ax.get_xaxis().set_minor_formatter(NullFormatter())
fmt = lambda v: r"$\Delta t/{0}$".format(v) if v!=1 else r"$\Delta t$"
plt.gca().set_xticklabels(list(map(fmt, N)))
#plt.savefig("stokes.pdf", orientation='portrait')
plt.show()
| [
"matplotlib.ticker.NullFormatter",
"sympy.sin",
"matplotlib.pyplot.text",
"sympy.cos",
"numpy.ceil",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gca",
"sympy.lambdify",
"numpy.log",
"sympy.symbols",
"matplotlib.pypl... | [((422, 451), 'sympy.symbols', 'symbols', (['"""x, y, t"""'], {'real': '(True)'}), "('x, y, t', real=True)\n", (429, 451), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((522, 528), 'sympy.sin', 'sin', (['t'], {}), '(t)\n', (525, 528), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((569, 575), 'sympy.sin', 'sin', (['t'], {}), '(t)\n', (572, 575), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((607, 613), 'sympy.sin', 'sin', (['t'], {}), '(t)\n', (610, 613), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((5501, 5533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.69, 4.27)'}), '(figsize=(5.69, 4.27))\n', (5511, 5533), True, 'import matplotlib.pyplot as plt\n'), ((5540, 5549), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5547, 5549), True, 'import matplotlib.pyplot as plt\n'), ((5908, 5973), 'matplotlib.pyplot.text', 'plt.text', (['N[0]', '(2e-05)', '"""$\\\\Delta t=5 \\\\times 10^{-3},\\\\; N=32^2$"""'], {}), "(N[0], 2e-05, '$\\\\Delta t=5 \\\\times 10^{-3},\\\\; N=32^2$')\n", (5916, 5973), True, 'import matplotlib.pyplot as plt\n'), ((5973, 6030), 'matplotlib.pyplot.text', 'plt.text', (['N[0]', '(1e-05)', '"""Final Time = $5 \\\\times 10^{-2}$"""'], {}), "(N[0], 1e-05, 'Final Time = $5 \\\\times 10^{-2}$')\n", (5981, 6030), True, 'import matplotlib.pyplot as plt\n'), ((6032, 6098), 'matplotlib.pyplot.title', 'plt.title', (['"""Stokes: $2^{nd}$-order Rotational Pressure-Correction"""'], {}), "('Stokes: $2^{nd}$-order Rotational Pressure-Correction')\n", (6041, 6098), True, 'import matplotlib.pyplot as plt\n'), ((6102, 6114), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6112, 6114), True, 'import matplotlib.pyplot as plt\n'), ((6116, 6131), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (6129, 6131), True, 'import matplotlib.pyplot as plt\n'), ((6136, 6165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|Error|_{L^2}$"""'], {}), "('$|Error|_{L^2}$')\n", (6146, 6165), True, 'import matplotlib.pyplot as plt\n'), ((6169, 6182), 'matplotlib.pyplot.xticks', 'plt.xticks', (['N'], {}), '(N)\n', (6179, 6182), True, 'import matplotlib.pyplot as plt\n'), ((6414, 6424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6422, 6424), True, 'import matplotlib.pyplot as plt\n'), ((507, 525), 'sympy.sin', 'sin', (['(2 * np.pi * y)'], {}), '(2 * np.pi * y)\n', (510, 525), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((581, 595), 'sympy.cos', 'cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (584, 595), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((594, 608), 'sympy.cos', 'cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (597, 608), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((840, 862), 'sympy.lambdify', 'lambdify', (['(x, y, t)', 'v'], {}), '((x, y, t), v)\n', (848, 862), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((2335, 2358), 'numpy.ceil', 'np.ceil', (['((tf - ti) / dt)'], {}), '((tf - ti) / dt)\n', (2342, 2358), True, 'import numpy as np\n'), ((5394, 5409), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (5403, 5409), True, 'import numpy as np\n'), ((5644, 5691), 'matplotlib.pyplot.loglog', 'plt.loglog', (['N', 'E[i, :]', 'marks[i]'], {'label': 'vars[i]'}), '(N, E[i, :], marks[i], label=vars[i])\n', (5654, 5691), True, 'import matplotlib.pyplot as plt\n'), ((6222, 6237), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (6235, 6237), False, 'from matplotlib.ticker import NullFormatter, ScalarFormatter\n'), ((490, 504), 'sympy.sin', 'sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (493, 504), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((536, 554), 'sympy.sin', 'sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (539, 554), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((552, 566), 'sympy.sin', 'sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (555, 566), False, 'from sympy import symbols, sin, cos, lambdify\n'), ((5725, 5739), 'numpy.log', 'np.log', (['N[-2:]'], {}), '(N[-2:])\n', (5731, 5739), True, 'import numpy as np\n'), ((5741, 5758), 'numpy.log', 'np.log', (['E[i, -2:]'], {}), '(E[i, -2:])\n', (5747, 5758), True, 'import numpy as np\n'), ((6314, 6323), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6321, 6323), True, 'import matplotlib.pyplot as plt\n')] |
from DejaVu.colorMap import ColorMap
from numpy import array
cm = ColorMap('rwb256')
cfg = {'name': 'rwb256', 'ramp': array([[ 1. , 0. , 0. , 1. ],
[ 0.00798478, 0.006 , 1. , 1. ],
[ 0.01297748, 0.011 , 1. , 1. ],
[ 0.02495463, 0.023 , 1. , 1. ],
[ 0.03094184, 0.029 , 1. , 1. ],
[ 0.03593225, 0.034 , 1. , 1. ],
[ 0.04790861, 0.046 , 1. , 1. ],
[ 0.0528977 , 0.051 , 1. , 1. ],
[ 0.06487406, 0.063 , 1. , 1. ],
[ 0.07086179, 0.069 , 1. , 1. ],
[ 0.07585198, 0.074 , 1. , 1. ],
[ 0.0878282 , 0.086 , 1. , 1. ],
[ 0.09281833, 0.091 , 1. , 1. ],
[ 0.09880612, 0.097 , 1. , 1. ],
[ 0.11078227, 0.109 , 1. , 1. ],
[ 0.11577237, 0.114 , 1. , 1. ],
[ 0.12774806, 0.126 , 1. , 1. ],
[ 0.13273776, 0.131 , 1. , 1. ],
[ 0.13872601, 0.13699999, 1. , 1. ],
[ 0.15070179, 0.149 , 1. , 1. ],
[ 0.15569188, 0.154 , 1. , 1. ],
[ 0.16168009, 0.16 , 1. , 1. ],
[ 0.17265806, 0.171 , 1. , 1. ],
[ 0.17864597, 0.177 , 1. , 1. ],
[ 0.19062206, 0.189 , 1. , 1. ],
[ 0.1956121 , 0.19400001, 1. , 1. ],
[ 0.20160003, 0.2 , 1. , 1. ],
[ 0.212578 , 0.211 , 1. , 1. ],
[ 0.21856593, 0.21699999, 1. , 1. ],
[ 0.22455387, 0.223 , 1. , 1. ],
[ 0.23553205, 0.234 , 1. , 1. ],
[ 0.24151999, 0.23999999, 1. , 1. ],
[ 0.25249797, 0.25099999, 1. , 1. ],
[ 0.25848609, 0.257 , 1. , 1. ],
[ 0.26447403, 0.26300001, 1. , 1. ],
[ 0.27545202, 0.27399999, 1. , 1. ],
[ 0.28143996, 0.28 , 1. , 1. ],
[ 0.28742805, 0.28600001, 1. , 1. ],
[ 0.29840603, 0.29699999, 1. , 1. ],
[ 0.30439401, 0.303 , 1. , 1. ],
[ 0.31537199, 0.31400001, 1. , 1. ],
[ 0.32135993, 0.31999999, 1. , 1. ],
[ 0.3273479 , 0.32600001, 1. , 1. ],
[ 0.33832601, 0.33700001, 1. , 1. ],
[ 0.34431398, 0.34299999, 1. , 1. ],
[ 0.35529196, 0.354 , 1. , 1. ],
[ 0.36128005, 0.36000001, 1. , 1. ],
[ 0.367268 , 0.366 , 1. , 1. ],
[ 0.37824601, 0.377 , 1. , 1. ],
[ 0.38423407, 0.38299999, 1. , 1. ],
[ 0.39022204, 0.389 , 1. , 1. ],
[ 0.40119994, 0.40000001, 1. , 1. ],
[ 0.407188 , 0.40599999, 1. , 1. ],
[ 0.41816598, 0.417 , 1. , 1. ],
[ 0.42415395, 0.42300001, 1. , 1. ],
[ 0.43014202, 0.42899999, 1. , 1. ],
[ 0.44112 , 0.44 , 1. , 1. ],
[ 0.44710797, 0.44600001, 1. , 1. ],
[ 0.45209798, 0.45100001, 1. , 1. ],
[ 0.46407402, 0.463 , 1. , 1. ],
[ 0.47006199, 0.46900001, 1. , 1. ],
[ 0.48104 , 0.47999999, 1. , 1. ],
[ 0.48702797, 0.486 , 1. , 1. ],
[ 0.49201798, 0.491 , 1. , 1. ],
[ 0.50399399, 0.50300002, 1. , 1. ],
[ 0.50998199, 0.509 , 1. , 1. ],
[ 0.51497197, 0.514 , 1. , 1. ],
[ 0.52694798, 0.52600002, 1. , 1. ],
[ 0.53193796, 0.53100002, 1. , 1. ],
[ 0.54391402, 0.54299998, 1. , 1. ],
[ 0.54990202, 0.54900002, 1. , 1. ],
[ 0.554892 , 0.55400002, 1. , 1. ],
[ 0.56686801, 0.56599998, 1. , 1. ],
[ 0.57185799, 0.57099998, 1. , 1. ],
[ 0.57784599, 0.57700002, 1. , 1. ],
[ 0.58982199, 0.58899999, 1. , 1. ],
[ 0.59481204, 0.59399998, 1. , 1. ],
[ 0.60678798, 0.60600001, 1. , 1. ],
[ 0.61177802, 0.611 , 1. , 1. ],
[ 0.61776602, 0.61699998, 1. , 1. ],
[ 0.62974203, 0.62900001, 1. , 1. ],
[ 0.63473201, 0.634 , 1. , 1. ],
[ 0.64072001, 0.63999999, 1. , 1. ],
[ 0.65169799, 0.65100002, 1. , 1. ],
[ 0.657686 , 0.65700001, 1. , 1. ],
[ 0.669662 , 0.66900003, 1. , 1. ],
[ 0.67465198, 0.67400002, 1. , 1. ],
[ 0.68063998, 0.68000001, 1. , 1. ],
[ 0.69161803, 0.69099998, 1. , 1. ],
[ 0.69760603, 0.69700003, 1. , 1. ],
[ 0.70958197, 0.70899999, 1. , 1. ],
[ 0.71457201, 0.71399999, 1. , 1. ],
[ 0.72056001, 0.72000003, 1. , 1. ],
[ 0.731538 , 0.73100001, 1. , 1. ],
[ 0.737526 , 0.73699999, 1. , 1. ],
[ 0.743514 , 0.74299997, 1. , 1. ],
[ 0.75449198, 0.75400001, 1. , 1. ],
[ 0.76047999, 0.75999999, 1. , 1. ],
[ 0.77145803, 0.77100003, 1. , 1. ],
[ 0.77744597, 0.77700001, 1. , 1. ],
[ 0.78343397, 0.78299999, 1. , 1. ],
[ 0.79441202, 0.79400003, 1. , 1. ],
[ 0.80040002, 0.80000001, 1. , 1. ],
[ 0.80638802, 0.80599999, 1. , 1. ],
[ 0.817366 , 0.81699997, 1. , 1. ],
[ 0.82335401, 0.82300001, 1. , 1. ],
[ 0.83433199, 0.83399999, 1. , 1. ],
[ 0.84031999, 0.83999997, 1. , 1. ],
[ 0.84630799, 0.84600002, 1. , 1. ],
[ 0.85728598, 0.85699999, 1. , 1. ],
[ 0.86327398, 0.86299998, 1. , 1. ],
[ 0.86926198, 0.86900002, 1. , 1. ],
[ 0.88024002, 0.88 , 1. , 1. ],
[ 0.88622802, 0.88599998, 1. , 1. ],
[ 0.89720601, 0.89700001, 1. , 1. ],
[ 0.90319401, 0.903 , 1. , 1. ],
[ 0.90918201, 0.90899998, 1. , 1. ],
[ 0.92016 , 0.92000002, 1. , 1. ],
[ 0.926148 , 0.926 , 1. , 1. ],
[ 0.93113798, 0.93099999, 1. , 1. ],
[ 0.94311398, 0.94300002, 1. , 1. ],
[ 0.94910198, 0.949 , 1. , 1. ],
[ 0.96008003, 0.95999998, 1. , 1. ],
[ 0.96606803, 0.96600002, 1. , 1. ],
[ 0.97105801, 0.97100002, 1. , 1. ],
[ 0.98303401, 0.98299998, 1. , 1. ],
[ 0.98902202, 0.98900002, 1. , 1. ],
[ 1. , 1. , 1. , 1. ],
[ 1. , 1. , 1. , 1. ],
[ 1. , 0.99400002, 0.99400002, 1. ],
[ 1. , 0.98900002, 0.98900002, 1. ],
[ 1. , 0.977 , 0.977 , 1. ],
[ 1. , 0.97100002, 0.97100002, 1. ],
[ 1. , 0.96600002, 0.96600002, 1. ],
[ 1. , 0.954 , 0.954 , 1. ],
[ 1. , 0.949 , 0.949 , 1. ],
[ 1. , 0.93699998, 0.93699998, 1. ],
[ 1. , 0.93099999, 0.93099999, 1. ],
[ 1. , 0.926 , 0.926 , 1. ],
[ 1. , 0.91399997, 0.91399997, 1. ],
[ 1. , 0.90899998, 0.90899998, 1. ],
[ 1. , 0.903 , 0.903 , 1. ],
[ 1. , 0.89099997, 0.89099997, 1. ],
[ 1. , 0.88599998, 0.88599998, 1. ],
[ 1. , 0.87400001, 0.87400001, 1. ],
[ 1. , 0.86900002, 0.86900002, 1. ],
[ 1. , 0.86299998, 0.86299998, 1. ],
[ 1. , 0.85100001, 0.85100001, 1. ],
[ 1. , 0.84600002, 0.84600002, 1. ],
[ 1. , 0.83999997, 0.83999997, 1. ],
[ 1. , 0.829 , 0.829 , 1. ],
[ 1. , 0.82300001, 0.82300001, 1. ],
[ 1. , 0.81099999, 0.81099999, 1. ],
[ 1. , 0.80599999, 0.80599999, 1. ],
[ 1. , 0.80000001, 0.80000001, 1. ],
[ 1. , 0.78899997, 0.78899997, 1. ],
[ 1. , 0.78299999, 0.78299999, 1. ],
[ 1. , 0.77700001, 0.77700001, 1. ],
[ 1. , 0.76599997, 0.76599997, 1. ],
[ 1. , 0.75999999, 0.75999999, 1. ],
[ 1. , 0.74900001, 0.74900001, 1. ],
[ 1. , 0.74299997, 0.74299997, 1. ],
[ 1. , 0.73699999, 0.73699999, 1. ],
[ 1. , 0.72600001, 0.72600001, 1. ],
[ 1. , 0.72000003, 0.72000003, 1. ],
[ 1. , 0.71399999, 0.71399999, 1. ],
[ 1. , 0.70300001, 0.70300001, 1. ],
[ 1. , 0.69700003, 0.69700003, 1. ],
[ 1. , 0.68599999, 0.68599999, 1. ],
[ 1. , 0.68000001, 0.68000001, 1. ],
[ 1. , 0.67400002, 0.67400002, 1. ],
[ 1. , 0.66299999, 0.66299999, 1. ],
[ 1. , 0.65700001, 0.65700001, 1. ],
[ 1. , 0.64600003, 0.64600003, 1. ],
[ 1. , 0.63999999, 0.63999999, 1. ],
[ 1. , 0.634 , 0.634 , 1. ],
[ 1. , 0.62300003, 0.62300003, 1. ],
[ 1. , 0.61699998, 0.61699998, 1. ],
[ 1. , 0.611 , 0.611 , 1. ],
[ 1. , 0.60000002, 0.60000002, 1. ],
[ 1. , 0.59399998, 0.59399998, 1. ],
[ 1. , 0.583 , 0.583 , 1. ],
[ 1. , 0.57700002, 0.57700002, 1. ],
[ 1. , 0.57099998, 0.57099998, 1. ],
[ 1. , 0.56 , 0.56 , 1. ],
[ 1. , 0.55400002, 0.55400002, 1. ],
[ 1. , 0.54900002, 0.54900002, 1. ],
[ 1. , 0.537 , 0.537 , 1. ],
[ 1. , 0.53100002, 0.53100002, 1. ],
[ 1. , 0.51999998, 0.51999998, 1. ],
[ 1. , 0.514 , 0.514 , 1. ],
[ 1. , 0.509 , 0.509 , 1. ],
[ 1. , 0.49700001, 0.49700001, 1. ],
[ 1. , 0.491 , 0.491 , 1. ],
[ 1. , 0.486 , 0.486 , 1. ],
[ 1. , 0.47400001, 0.47400001, 1. ],
[ 1. , 0.46900001, 0.46900001, 1. ],
[ 1. , 0.45699999, 0.45699999, 1. ],
[ 1. , 0.45100001, 0.45100001, 1. ],
[ 1. , 0.44600001, 0.44600001, 1. ],
[ 1. , 0.43399999, 0.43399999, 1. ],
[ 1. , 0.42899999, 0.42899999, 1. ],
[ 1. , 0.42300001, 0.42300001, 1. ],
[ 1. , 0.41100001, 0.41100001, 1. ],
[ 1. , 0.40599999, 0.40599999, 1. ],
[ 1. , 0.39399999, 0.39399999, 1. ],
[ 1. , 0.389 , 0.389 , 1. ],
[ 1. , 0.38299999, 0.38299999, 1. ],
[ 1. , 0.37099999, 0.37099999, 1. ],
[ 1. , 0.366 , 0.366 , 1. ],
[ 1. , 0.36000001, 0.36000001, 1. ],
[ 1. , 0.34900001, 0.34900001, 1. ],
[ 1. , 0.34299999, 0.34299999, 1. ],
[ 1. , 0.331 , 0.331 , 1. ],
[ 1. , 0.32600001, 0.32600001, 1. ],
[ 1. , 0.31999999, 0.31999999, 1. ],
[ 1. , 0.30899999, 0.30899999, 1. ],
[ 1. , 0.303 , 0.303 , 1. ],
[ 1. , 0.29100001, 0.29100001, 1. ],
[ 1. , 0.28600001, 0.28600001, 1. ],
[ 1. , 0.28 , 0.28 , 1. ],
[ 1. , 0.26899999, 0.26899999, 1. ],
[ 1. , 0.26300001, 0.26300001, 1. ],
[ 1. , 0.257 , 0.257 , 1. ],
[ 1. , 0.24600001, 0.24600001, 1. ],
[ 1. , 0.23999999, 0.23999999, 1. ],
[ 1. , 0.229 , 0.229 , 1. ],
[ 1. , 0.223 , 0.223 , 1. ],
[ 1. , 0.21699999, 0.21699999, 1. ],
[ 1. , 0.206 , 0.206 , 1. ],
[ 1. , 0.2 , 0.2 , 1. ],
[ 1. , 0.19400001, 0.19400001, 1. ],
[ 1. , 0.183 , 0.183 , 1. ],
[ 1. , 0.177 , 0.177 , 1. ],
[ 1. , 0.16599999, 0.16599999, 1. ],
[ 1. , 0.16 , 0.16 , 1. ],
[ 1. , 0.154 , 0.154 , 1. ],
[ 1. , 0.14300001, 0.14300001, 1. ],
[ 1. , 0.13699999, 0.13699999, 1. ],
[ 1. , 0.131 , 0.131 , 1. ],
[ 1. , 0.12 , 0.12 , 1. ],
[ 1. , 0.114 , 0.114 , 1. ],
[ 1. , 0.103 , 0.103 , 1. ],
[ 1. , 0.097 , 0.097 , 1. ],
[ 1. , 0.091 , 0.091 , 1. ],
[ 1. , 0.08 , 0.08 , 1. ],
[ 1. , 0.074 , 0.074 , 1. ],
[ 1. , 0.069 , 0.069 , 1. ],
[ 1. , 0.057 , 0.057 , 1. ],
[ 1. , 0.051 , 0.051 , 1. ],
[ 1. , 0.04 , 0.04 , 1. ],
[ 1. , 0.034 , 0.034 , 1. ],
[ 1. , 0.029 , 0.029 , 1. ],
[ 1. , 0.017 , 0.017 , 1. ],
[ 1. , 0.011 , 0.011 , 1. ],
[ 1. , 0. , 0. , 1. ]],'f'), 'maxi': 10.0, 'mini': 0.0}
cm.configure(*(), **cfg)
| [
"numpy.array",
"DejaVu.colorMap.ColorMap"
] | [((66, 84), 'DejaVu.colorMap.ColorMap', 'ColorMap', (['"""rwb256"""'], {}), "('rwb256')\n", (74, 84), False, 'from DejaVu.colorMap import ColorMap\n'), ((118, 9097), 'numpy.array', 'array', (['[[1.0, 0.0, 0.0, 1.0], [0.00798478, 0.006, 1.0, 1.0], [0.01297748, 0.011, \n 1.0, 1.0], [0.02495463, 0.023, 1.0, 1.0], [0.03094184, 0.029, 1.0, 1.0],\n [0.03593225, 0.034, 1.0, 1.0], [0.04790861, 0.046, 1.0, 1.0], [\n 0.0528977, 0.051, 1.0, 1.0], [0.06487406, 0.063, 1.0, 1.0], [0.07086179,\n 0.069, 1.0, 1.0], [0.07585198, 0.074, 1.0, 1.0], [0.0878282, 0.086, 1.0,\n 1.0], [0.09281833, 0.091, 1.0, 1.0], [0.09880612, 0.097, 1.0, 1.0], [\n 0.11078227, 0.109, 1.0, 1.0], [0.11577237, 0.114, 1.0, 1.0], [\n 0.12774806, 0.126, 1.0, 1.0], [0.13273776, 0.131, 1.0, 1.0], [\n 0.13872601, 0.13699999, 1.0, 1.0], [0.15070179, 0.149, 1.0, 1.0], [\n 0.15569188, 0.154, 1.0, 1.0], [0.16168009, 0.16, 1.0, 1.0], [0.17265806,\n 0.171, 1.0, 1.0], [0.17864597, 0.177, 1.0, 1.0], [0.19062206, 0.189, \n 1.0, 1.0], [0.1956121, 0.19400001, 1.0, 1.0], [0.20160003, 0.2, 1.0, \n 1.0], [0.212578, 0.211, 1.0, 1.0], [0.21856593, 0.21699999, 1.0, 1.0],\n [0.22455387, 0.223, 1.0, 1.0], [0.23553205, 0.234, 1.0, 1.0], [\n 0.24151999, 0.23999999, 1.0, 1.0], [0.25249797, 0.25099999, 1.0, 1.0],\n [0.25848609, 0.257, 1.0, 1.0], [0.26447403, 0.26300001, 1.0, 1.0], [\n 0.27545202, 0.27399999, 1.0, 1.0], [0.28143996, 0.28, 1.0, 1.0], [\n 0.28742805, 0.28600001, 1.0, 1.0], [0.29840603, 0.29699999, 1.0, 1.0],\n [0.30439401, 0.303, 1.0, 1.0], [0.31537199, 0.31400001, 1.0, 1.0], [\n 0.32135993, 0.31999999, 1.0, 1.0], [0.3273479, 0.32600001, 1.0, 1.0], [\n 0.33832601, 0.33700001, 1.0, 1.0], [0.34431398, 0.34299999, 1.0, 1.0],\n [0.35529196, 0.354, 1.0, 1.0], [0.36128005, 0.36000001, 1.0, 1.0], [\n 0.367268, 0.366, 1.0, 1.0], [0.37824601, 0.377, 1.0, 1.0], [0.38423407,\n 0.38299999, 1.0, 1.0], [0.39022204, 0.389, 1.0, 1.0], [0.40119994, \n 0.40000001, 1.0, 1.0], [0.407188, 0.40599999, 1.0, 1.0], [0.41816598, \n 0.417, 1.0, 1.0], [0.42415395, 0.42300001, 1.0, 1.0], [0.43014202, \n 0.42899999, 1.0, 1.0], [0.44112, 0.44, 1.0, 1.0], [0.44710797, \n 0.44600001, 1.0, 1.0], [0.45209798, 0.45100001, 1.0, 1.0], [0.46407402,\n 0.463, 1.0, 1.0], [0.47006199, 0.46900001, 1.0, 1.0], [0.48104, \n 0.47999999, 1.0, 1.0], [0.48702797, 0.486, 1.0, 1.0], [0.49201798, \n 0.491, 1.0, 1.0], [0.50399399, 0.50300002, 1.0, 1.0], [0.50998199, \n 0.509, 1.0, 1.0], [0.51497197, 0.514, 1.0, 1.0], [0.52694798, \n 0.52600002, 1.0, 1.0], [0.53193796, 0.53100002, 1.0, 1.0], [0.54391402,\n 0.54299998, 1.0, 1.0], [0.54990202, 0.54900002, 1.0, 1.0], [0.554892, \n 0.55400002, 1.0, 1.0], [0.56686801, 0.56599998, 1.0, 1.0], [0.57185799,\n 0.57099998, 1.0, 1.0], [0.57784599, 0.57700002, 1.0, 1.0], [0.58982199,\n 0.58899999, 1.0, 1.0], [0.59481204, 0.59399998, 1.0, 1.0], [0.60678798,\n 0.60600001, 1.0, 1.0], [0.61177802, 0.611, 1.0, 1.0], [0.61776602, \n 0.61699998, 1.0, 1.0], [0.62974203, 0.62900001, 1.0, 1.0], [0.63473201,\n 0.634, 1.0, 1.0], [0.64072001, 0.63999999, 1.0, 1.0], [0.65169799, \n 0.65100002, 1.0, 1.0], [0.657686, 0.65700001, 1.0, 1.0], [0.669662, \n 0.66900003, 1.0, 1.0], [0.67465198, 0.67400002, 1.0, 1.0], [0.68063998,\n 0.68000001, 1.0, 1.0], [0.69161803, 0.69099998, 1.0, 1.0], [0.69760603,\n 0.69700003, 1.0, 1.0], [0.70958197, 0.70899999, 1.0, 1.0], [0.71457201,\n 0.71399999, 1.0, 1.0], [0.72056001, 0.72000003, 1.0, 1.0], [0.731538, \n 0.73100001, 1.0, 1.0], [0.737526, 0.73699999, 1.0, 1.0], [0.743514, \n 0.74299997, 1.0, 1.0], [0.75449198, 0.75400001, 1.0, 1.0], [0.76047999,\n 0.75999999, 1.0, 1.0], [0.77145803, 0.77100003, 1.0, 1.0], [0.77744597,\n 0.77700001, 1.0, 1.0], [0.78343397, 0.78299999, 1.0, 1.0], [0.79441202,\n 0.79400003, 1.0, 1.0], [0.80040002, 0.80000001, 1.0, 1.0], [0.80638802,\n 0.80599999, 1.0, 1.0], [0.817366, 0.81699997, 1.0, 1.0], [0.82335401, \n 0.82300001, 1.0, 1.0], [0.83433199, 0.83399999, 1.0, 1.0], [0.84031999,\n 0.83999997, 1.0, 1.0], [0.84630799, 0.84600002, 1.0, 1.0], [0.85728598,\n 0.85699999, 1.0, 1.0], [0.86327398, 0.86299998, 1.0, 1.0], [0.86926198,\n 0.86900002, 1.0, 1.0], [0.88024002, 0.88, 1.0, 1.0], [0.88622802, \n 0.88599998, 1.0, 1.0], [0.89720601, 0.89700001, 1.0, 1.0], [0.90319401,\n 0.903, 1.0, 1.0], [0.90918201, 0.90899998, 1.0, 1.0], [0.92016, \n 0.92000002, 1.0, 1.0], [0.926148, 0.926, 1.0, 1.0], [0.93113798, \n 0.93099999, 1.0, 1.0], [0.94311398, 0.94300002, 1.0, 1.0], [0.94910198,\n 0.949, 1.0, 1.0], [0.96008003, 0.95999998, 1.0, 1.0], [0.96606803, \n 0.96600002, 1.0, 1.0], [0.97105801, 0.97100002, 1.0, 1.0], [0.98303401,\n 0.98299998, 1.0, 1.0], [0.98902202, 0.98900002, 1.0, 1.0], [1.0, 1.0, \n 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 0.99400002, 0.99400002, 1.0], [\n 1.0, 0.98900002, 0.98900002, 1.0], [1.0, 0.977, 0.977, 1.0], [1.0, \n 0.97100002, 0.97100002, 1.0], [1.0, 0.96600002, 0.96600002, 1.0], [1.0,\n 0.954, 0.954, 1.0], [1.0, 0.949, 0.949, 1.0], [1.0, 0.93699998, \n 0.93699998, 1.0], [1.0, 0.93099999, 0.93099999, 1.0], [1.0, 0.926, \n 0.926, 1.0], [1.0, 0.91399997, 0.91399997, 1.0], [1.0, 0.90899998, \n 0.90899998, 1.0], [1.0, 0.903, 0.903, 1.0], [1.0, 0.89099997, \n 0.89099997, 1.0], [1.0, 0.88599998, 0.88599998, 1.0], [1.0, 0.87400001,\n 0.87400001, 1.0], [1.0, 0.86900002, 0.86900002, 1.0], [1.0, 0.86299998,\n 0.86299998, 1.0], [1.0, 0.85100001, 0.85100001, 1.0], [1.0, 0.84600002,\n 0.84600002, 1.0], [1.0, 0.83999997, 0.83999997, 1.0], [1.0, 0.829, \n 0.829, 1.0], [1.0, 0.82300001, 0.82300001, 1.0], [1.0, 0.81099999, \n 0.81099999, 1.0], [1.0, 0.80599999, 0.80599999, 1.0], [1.0, 0.80000001,\n 0.80000001, 1.0], [1.0, 0.78899997, 0.78899997, 1.0], [1.0, 0.78299999,\n 0.78299999, 1.0], [1.0, 0.77700001, 0.77700001, 1.0], [1.0, 0.76599997,\n 0.76599997, 1.0], [1.0, 0.75999999, 0.75999999, 1.0], [1.0, 0.74900001,\n 0.74900001, 1.0], [1.0, 0.74299997, 0.74299997, 1.0], [1.0, 0.73699999,\n 0.73699999, 1.0], [1.0, 0.72600001, 0.72600001, 1.0], [1.0, 0.72000003,\n 0.72000003, 1.0], [1.0, 0.71399999, 0.71399999, 1.0], [1.0, 0.70300001,\n 0.70300001, 1.0], [1.0, 0.69700003, 0.69700003, 1.0], [1.0, 0.68599999,\n 0.68599999, 1.0], [1.0, 0.68000001, 0.68000001, 1.0], [1.0, 0.67400002,\n 0.67400002, 1.0], [1.0, 0.66299999, 0.66299999, 1.0], [1.0, 0.65700001,\n 0.65700001, 1.0], [1.0, 0.64600003, 0.64600003, 1.0], [1.0, 0.63999999,\n 0.63999999, 1.0], [1.0, 0.634, 0.634, 1.0], [1.0, 0.62300003, \n 0.62300003, 1.0], [1.0, 0.61699998, 0.61699998, 1.0], [1.0, 0.611, \n 0.611, 1.0], [1.0, 0.60000002, 0.60000002, 1.0], [1.0, 0.59399998, \n 0.59399998, 1.0], [1.0, 0.583, 0.583, 1.0], [1.0, 0.57700002, \n 0.57700002, 1.0], [1.0, 0.57099998, 0.57099998, 1.0], [1.0, 0.56, 0.56,\n 1.0], [1.0, 0.55400002, 0.55400002, 1.0], [1.0, 0.54900002, 0.54900002,\n 1.0], [1.0, 0.537, 0.537, 1.0], [1.0, 0.53100002, 0.53100002, 1.0], [\n 1.0, 0.51999998, 0.51999998, 1.0], [1.0, 0.514, 0.514, 1.0], [1.0, \n 0.509, 0.509, 1.0], [1.0, 0.49700001, 0.49700001, 1.0], [1.0, 0.491, \n 0.491, 1.0], [1.0, 0.486, 0.486, 1.0], [1.0, 0.47400001, 0.47400001, \n 1.0], [1.0, 0.46900001, 0.46900001, 1.0], [1.0, 0.45699999, 0.45699999,\n 1.0], [1.0, 0.45100001, 0.45100001, 1.0], [1.0, 0.44600001, 0.44600001,\n 1.0], [1.0, 0.43399999, 0.43399999, 1.0], [1.0, 0.42899999, 0.42899999,\n 1.0], [1.0, 0.42300001, 0.42300001, 1.0], [1.0, 0.41100001, 0.41100001,\n 1.0], [1.0, 0.40599999, 0.40599999, 1.0], [1.0, 0.39399999, 0.39399999,\n 1.0], [1.0, 0.389, 0.389, 1.0], [1.0, 0.38299999, 0.38299999, 1.0], [\n 1.0, 0.37099999, 0.37099999, 1.0], [1.0, 0.366, 0.366, 1.0], [1.0, \n 0.36000001, 0.36000001, 1.0], [1.0, 0.34900001, 0.34900001, 1.0], [1.0,\n 0.34299999, 0.34299999, 1.0], [1.0, 0.331, 0.331, 1.0], [1.0, \n 0.32600001, 0.32600001, 1.0], [1.0, 0.31999999, 0.31999999, 1.0], [1.0,\n 0.30899999, 0.30899999, 1.0], [1.0, 0.303, 0.303, 1.0], [1.0, \n 0.29100001, 0.29100001, 1.0], [1.0, 0.28600001, 0.28600001, 1.0], [1.0,\n 0.28, 0.28, 1.0], [1.0, 0.26899999, 0.26899999, 1.0], [1.0, 0.26300001,\n 0.26300001, 1.0], [1.0, 0.257, 0.257, 1.0], [1.0, 0.24600001, \n 0.24600001, 1.0], [1.0, 0.23999999, 0.23999999, 1.0], [1.0, 0.229, \n 0.229, 1.0], [1.0, 0.223, 0.223, 1.0], [1.0, 0.21699999, 0.21699999, \n 1.0], [1.0, 0.206, 0.206, 1.0], [1.0, 0.2, 0.2, 1.0], [1.0, 0.19400001,\n 0.19400001, 1.0], [1.0, 0.183, 0.183, 1.0], [1.0, 0.177, 0.177, 1.0], [\n 1.0, 0.16599999, 0.16599999, 1.0], [1.0, 0.16, 0.16, 1.0], [1.0, 0.154,\n 0.154, 1.0], [1.0, 0.14300001, 0.14300001, 1.0], [1.0, 0.13699999, \n 0.13699999, 1.0], [1.0, 0.131, 0.131, 1.0], [1.0, 0.12, 0.12, 1.0], [\n 1.0, 0.114, 0.114, 1.0], [1.0, 0.103, 0.103, 1.0], [1.0, 0.097, 0.097, \n 1.0], [1.0, 0.091, 0.091, 1.0], [1.0, 0.08, 0.08, 1.0], [1.0, 0.074, \n 0.074, 1.0], [1.0, 0.069, 0.069, 1.0], [1.0, 0.057, 0.057, 1.0], [1.0, \n 0.051, 0.051, 1.0], [1.0, 0.04, 0.04, 1.0], [1.0, 0.034, 0.034, 1.0], [\n 1.0, 0.029, 0.029, 1.0], [1.0, 0.017, 0.017, 1.0], [1.0, 0.011, 0.011, \n 1.0], [1.0, 0.0, 0.0, 1.0]]', '"""f"""'], {}), "([[1.0, 0.0, 0.0, 1.0], [0.00798478, 0.006, 1.0, 1.0], [0.01297748, \n 0.011, 1.0, 1.0], [0.02495463, 0.023, 1.0, 1.0], [0.03094184, 0.029, \n 1.0, 1.0], [0.03593225, 0.034, 1.0, 1.0], [0.04790861, 0.046, 1.0, 1.0],\n [0.0528977, 0.051, 1.0, 1.0], [0.06487406, 0.063, 1.0, 1.0], [\n 0.07086179, 0.069, 1.0, 1.0], [0.07585198, 0.074, 1.0, 1.0], [0.0878282,\n 0.086, 1.0, 1.0], [0.09281833, 0.091, 1.0, 1.0], [0.09880612, 0.097, \n 1.0, 1.0], [0.11078227, 0.109, 1.0, 1.0], [0.11577237, 0.114, 1.0, 1.0],\n [0.12774806, 0.126, 1.0, 1.0], [0.13273776, 0.131, 1.0, 1.0], [\n 0.13872601, 0.13699999, 1.0, 1.0], [0.15070179, 0.149, 1.0, 1.0], [\n 0.15569188, 0.154, 1.0, 1.0], [0.16168009, 0.16, 1.0, 1.0], [0.17265806,\n 0.171, 1.0, 1.0], [0.17864597, 0.177, 1.0, 1.0], [0.19062206, 0.189, \n 1.0, 1.0], [0.1956121, 0.19400001, 1.0, 1.0], [0.20160003, 0.2, 1.0, \n 1.0], [0.212578, 0.211, 1.0, 1.0], [0.21856593, 0.21699999, 1.0, 1.0],\n [0.22455387, 0.223, 1.0, 1.0], [0.23553205, 0.234, 1.0, 1.0], [\n 0.24151999, 0.23999999, 1.0, 1.0], [0.25249797, 0.25099999, 1.0, 1.0],\n [0.25848609, 0.257, 1.0, 1.0], [0.26447403, 0.26300001, 1.0, 1.0], [\n 0.27545202, 0.27399999, 1.0, 1.0], [0.28143996, 0.28, 1.0, 1.0], [\n 0.28742805, 0.28600001, 1.0, 1.0], [0.29840603, 0.29699999, 1.0, 1.0],\n [0.30439401, 0.303, 1.0, 1.0], [0.31537199, 0.31400001, 1.0, 1.0], [\n 0.32135993, 0.31999999, 1.0, 1.0], [0.3273479, 0.32600001, 1.0, 1.0], [\n 0.33832601, 0.33700001, 1.0, 1.0], [0.34431398, 0.34299999, 1.0, 1.0],\n [0.35529196, 0.354, 1.0, 1.0], [0.36128005, 0.36000001, 1.0, 1.0], [\n 0.367268, 0.366, 1.0, 1.0], [0.37824601, 0.377, 1.0, 1.0], [0.38423407,\n 0.38299999, 1.0, 1.0], [0.39022204, 0.389, 1.0, 1.0], [0.40119994, \n 0.40000001, 1.0, 1.0], [0.407188, 0.40599999, 1.0, 1.0], [0.41816598, \n 0.417, 1.0, 1.0], [0.42415395, 0.42300001, 1.0, 1.0], [0.43014202, \n 0.42899999, 1.0, 1.0], [0.44112, 0.44, 1.0, 1.0], [0.44710797, \n 0.44600001, 1.0, 1.0], [0.45209798, 0.45100001, 1.0, 1.0], [0.46407402,\n 0.463, 1.0, 1.0], [0.47006199, 0.46900001, 1.0, 1.0], [0.48104, \n 0.47999999, 1.0, 1.0], [0.48702797, 0.486, 1.0, 1.0], [0.49201798, \n 0.491, 1.0, 1.0], [0.50399399, 0.50300002, 1.0, 1.0], [0.50998199, \n 0.509, 1.0, 1.0], [0.51497197, 0.514, 1.0, 1.0], [0.52694798, \n 0.52600002, 1.0, 1.0], [0.53193796, 0.53100002, 1.0, 1.0], [0.54391402,\n 0.54299998, 1.0, 1.0], [0.54990202, 0.54900002, 1.0, 1.0], [0.554892, \n 0.55400002, 1.0, 1.0], [0.56686801, 0.56599998, 1.0, 1.0], [0.57185799,\n 0.57099998, 1.0, 1.0], [0.57784599, 0.57700002, 1.0, 1.0], [0.58982199,\n 0.58899999, 1.0, 1.0], [0.59481204, 0.59399998, 1.0, 1.0], [0.60678798,\n 0.60600001, 1.0, 1.0], [0.61177802, 0.611, 1.0, 1.0], [0.61776602, \n 0.61699998, 1.0, 1.0], [0.62974203, 0.62900001, 1.0, 1.0], [0.63473201,\n 0.634, 1.0, 1.0], [0.64072001, 0.63999999, 1.0, 1.0], [0.65169799, \n 0.65100002, 1.0, 1.0], [0.657686, 0.65700001, 1.0, 1.0], [0.669662, \n 0.66900003, 1.0, 1.0], [0.67465198, 0.67400002, 1.0, 1.0], [0.68063998,\n 0.68000001, 1.0, 1.0], [0.69161803, 0.69099998, 1.0, 1.0], [0.69760603,\n 0.69700003, 1.0, 1.0], [0.70958197, 0.70899999, 1.0, 1.0], [0.71457201,\n 0.71399999, 1.0, 1.0], [0.72056001, 0.72000003, 1.0, 1.0], [0.731538, \n 0.73100001, 1.0, 1.0], [0.737526, 0.73699999, 1.0, 1.0], [0.743514, \n 0.74299997, 1.0, 1.0], [0.75449198, 0.75400001, 1.0, 1.0], [0.76047999,\n 0.75999999, 1.0, 1.0], [0.77145803, 0.77100003, 1.0, 1.0], [0.77744597,\n 0.77700001, 1.0, 1.0], [0.78343397, 0.78299999, 1.0, 1.0], [0.79441202,\n 0.79400003, 1.0, 1.0], [0.80040002, 0.80000001, 1.0, 1.0], [0.80638802,\n 0.80599999, 1.0, 1.0], [0.817366, 0.81699997, 1.0, 1.0], [0.82335401, \n 0.82300001, 1.0, 1.0], [0.83433199, 0.83399999, 1.0, 1.0], [0.84031999,\n 0.83999997, 1.0, 1.0], [0.84630799, 0.84600002, 1.0, 1.0], [0.85728598,\n 0.85699999, 1.0, 1.0], [0.86327398, 0.86299998, 1.0, 1.0], [0.86926198,\n 0.86900002, 1.0, 1.0], [0.88024002, 0.88, 1.0, 1.0], [0.88622802, \n 0.88599998, 1.0, 1.0], [0.89720601, 0.89700001, 1.0, 1.0], [0.90319401,\n 0.903, 1.0, 1.0], [0.90918201, 0.90899998, 1.0, 1.0], [0.92016, \n 0.92000002, 1.0, 1.0], [0.926148, 0.926, 1.0, 1.0], [0.93113798, \n 0.93099999, 1.0, 1.0], [0.94311398, 0.94300002, 1.0, 1.0], [0.94910198,\n 0.949, 1.0, 1.0], [0.96008003, 0.95999998, 1.0, 1.0], [0.96606803, \n 0.96600002, 1.0, 1.0], [0.97105801, 0.97100002, 1.0, 1.0], [0.98303401,\n 0.98299998, 1.0, 1.0], [0.98902202, 0.98900002, 1.0, 1.0], [1.0, 1.0, \n 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 0.99400002, 0.99400002, 1.0], [\n 1.0, 0.98900002, 0.98900002, 1.0], [1.0, 0.977, 0.977, 1.0], [1.0, \n 0.97100002, 0.97100002, 1.0], [1.0, 0.96600002, 0.96600002, 1.0], [1.0,\n 0.954, 0.954, 1.0], [1.0, 0.949, 0.949, 1.0], [1.0, 0.93699998, \n 0.93699998, 1.0], [1.0, 0.93099999, 0.93099999, 1.0], [1.0, 0.926, \n 0.926, 1.0], [1.0, 0.91399997, 0.91399997, 1.0], [1.0, 0.90899998, \n 0.90899998, 1.0], [1.0, 0.903, 0.903, 1.0], [1.0, 0.89099997, \n 0.89099997, 1.0], [1.0, 0.88599998, 0.88599998, 1.0], [1.0, 0.87400001,\n 0.87400001, 1.0], [1.0, 0.86900002, 0.86900002, 1.0], [1.0, 0.86299998,\n 0.86299998, 1.0], [1.0, 0.85100001, 0.85100001, 1.0], [1.0, 0.84600002,\n 0.84600002, 1.0], [1.0, 0.83999997, 0.83999997, 1.0], [1.0, 0.829, \n 0.829, 1.0], [1.0, 0.82300001, 0.82300001, 1.0], [1.0, 0.81099999, \n 0.81099999, 1.0], [1.0, 0.80599999, 0.80599999, 1.0], [1.0, 0.80000001,\n 0.80000001, 1.0], [1.0, 0.78899997, 0.78899997, 1.0], [1.0, 0.78299999,\n 0.78299999, 1.0], [1.0, 0.77700001, 0.77700001, 1.0], [1.0, 0.76599997,\n 0.76599997, 1.0], [1.0, 0.75999999, 0.75999999, 1.0], [1.0, 0.74900001,\n 0.74900001, 1.0], [1.0, 0.74299997, 0.74299997, 1.0], [1.0, 0.73699999,\n 0.73699999, 1.0], [1.0, 0.72600001, 0.72600001, 1.0], [1.0, 0.72000003,\n 0.72000003, 1.0], [1.0, 0.71399999, 0.71399999, 1.0], [1.0, 0.70300001,\n 0.70300001, 1.0], [1.0, 0.69700003, 0.69700003, 1.0], [1.0, 0.68599999,\n 0.68599999, 1.0], [1.0, 0.68000001, 0.68000001, 1.0], [1.0, 0.67400002,\n 0.67400002, 1.0], [1.0, 0.66299999, 0.66299999, 1.0], [1.0, 0.65700001,\n 0.65700001, 1.0], [1.0, 0.64600003, 0.64600003, 1.0], [1.0, 0.63999999,\n 0.63999999, 1.0], [1.0, 0.634, 0.634, 1.0], [1.0, 0.62300003, \n 0.62300003, 1.0], [1.0, 0.61699998, 0.61699998, 1.0], [1.0, 0.611, \n 0.611, 1.0], [1.0, 0.60000002, 0.60000002, 1.0], [1.0, 0.59399998, \n 0.59399998, 1.0], [1.0, 0.583, 0.583, 1.0], [1.0, 0.57700002, \n 0.57700002, 1.0], [1.0, 0.57099998, 0.57099998, 1.0], [1.0, 0.56, 0.56,\n 1.0], [1.0, 0.55400002, 0.55400002, 1.0], [1.0, 0.54900002, 0.54900002,\n 1.0], [1.0, 0.537, 0.537, 1.0], [1.0, 0.53100002, 0.53100002, 1.0], [\n 1.0, 0.51999998, 0.51999998, 1.0], [1.0, 0.514, 0.514, 1.0], [1.0, \n 0.509, 0.509, 1.0], [1.0, 0.49700001, 0.49700001, 1.0], [1.0, 0.491, \n 0.491, 1.0], [1.0, 0.486, 0.486, 1.0], [1.0, 0.47400001, 0.47400001, \n 1.0], [1.0, 0.46900001, 0.46900001, 1.0], [1.0, 0.45699999, 0.45699999,\n 1.0], [1.0, 0.45100001, 0.45100001, 1.0], [1.0, 0.44600001, 0.44600001,\n 1.0], [1.0, 0.43399999, 0.43399999, 1.0], [1.0, 0.42899999, 0.42899999,\n 1.0], [1.0, 0.42300001, 0.42300001, 1.0], [1.0, 0.41100001, 0.41100001,\n 1.0], [1.0, 0.40599999, 0.40599999, 1.0], [1.0, 0.39399999, 0.39399999,\n 1.0], [1.0, 0.389, 0.389, 1.0], [1.0, 0.38299999, 0.38299999, 1.0], [\n 1.0, 0.37099999, 0.37099999, 1.0], [1.0, 0.366, 0.366, 1.0], [1.0, \n 0.36000001, 0.36000001, 1.0], [1.0, 0.34900001, 0.34900001, 1.0], [1.0,\n 0.34299999, 0.34299999, 1.0], [1.0, 0.331, 0.331, 1.0], [1.0, \n 0.32600001, 0.32600001, 1.0], [1.0, 0.31999999, 0.31999999, 1.0], [1.0,\n 0.30899999, 0.30899999, 1.0], [1.0, 0.303, 0.303, 1.0], [1.0, \n 0.29100001, 0.29100001, 1.0], [1.0, 0.28600001, 0.28600001, 1.0], [1.0,\n 0.28, 0.28, 1.0], [1.0, 0.26899999, 0.26899999, 1.0], [1.0, 0.26300001,\n 0.26300001, 1.0], [1.0, 0.257, 0.257, 1.0], [1.0, 0.24600001, \n 0.24600001, 1.0], [1.0, 0.23999999, 0.23999999, 1.0], [1.0, 0.229, \n 0.229, 1.0], [1.0, 0.223, 0.223, 1.0], [1.0, 0.21699999, 0.21699999, \n 1.0], [1.0, 0.206, 0.206, 1.0], [1.0, 0.2, 0.2, 1.0], [1.0, 0.19400001,\n 0.19400001, 1.0], [1.0, 0.183, 0.183, 1.0], [1.0, 0.177, 0.177, 1.0], [\n 1.0, 0.16599999, 0.16599999, 1.0], [1.0, 0.16, 0.16, 1.0], [1.0, 0.154,\n 0.154, 1.0], [1.0, 0.14300001, 0.14300001, 1.0], [1.0, 0.13699999, \n 0.13699999, 1.0], [1.0, 0.131, 0.131, 1.0], [1.0, 0.12, 0.12, 1.0], [\n 1.0, 0.114, 0.114, 1.0], [1.0, 0.103, 0.103, 1.0], [1.0, 0.097, 0.097, \n 1.0], [1.0, 0.091, 0.091, 1.0], [1.0, 0.08, 0.08, 1.0], [1.0, 0.074, \n 0.074, 1.0], [1.0, 0.069, 0.069, 1.0], [1.0, 0.057, 0.057, 1.0], [1.0, \n 0.051, 0.051, 1.0], [1.0, 0.04, 0.04, 1.0], [1.0, 0.034, 0.034, 1.0], [\n 1.0, 0.029, 0.029, 1.0], [1.0, 0.017, 0.017, 1.0], [1.0, 0.011, 0.011, \n 1.0], [1.0, 0.0, 0.0, 1.0]], 'f')\n", (123, 9097), False, 'from numpy import array\n')] |
# -*- coding: utf-8 -*-
"""
Created on 17-8-1
@author: hy_qiu
"""
import base64
import random
import time
import cv2
import numpy
import requests
MAIN_WINDOW_NAME = 'verify'
value1 = 4
max_value2 = 18 #最大旋转角度,10的倍数
value2 = max_value2 // 2 #起始角度,90
value3 = 2
curidx = 0
#RGB Format
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 255, 0)]
class Align:
def __init__(self, align='cc'):
self.halign = align[0].lower()
self.valign = align[1].lower()
if self.halign not in 'lcr':
self.halign = 'c'
if self.valign not in 'tcb':
self.valign = 'c'
def get_topleft(self, box, size):
(bx, by, bw, bh) = box
(sw, sh) = size
x = y = 0
if self.halign == 'l':
x = bx
elif self.halign == 'c':
x = bx + int((bw - sw) / 2 + 0.5)
elif self.halign == 'r':
x = bx + bw - sw
if self.valign == 't':
y = by
elif self.valign == 'c':
y = by + int((bh - sh) / 2 + 0.5)
elif self.valign == 'b':
y = by + bh - sh
return x, y
def get_bottomleft(self, box, size):
x, y = self.get_topleft(box, size)
y += size[1]
return x, y
def get_feature(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = numpy.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
img[dst > 0.01 * dst.max()] = [0, 0, 255]
return img
# dst = cv2.goodFeaturesToTrack(gray, 200, 0.01, 2)
# for n in dst:
# pos = n[0]
# img[int(pos[1]), int(pos[0])] = [0, 0, 255]
# dst = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2. THRESH_BINARY_INV, 5, 0)
# # img[dst > 0.01 * dst.max()] = [0, 0, 255]
# img = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
# return img
def get_tgimg(img):
"""
处理提示图片,提取提示字符
:param img: 提示图片
:type img:
:return: 返回原图描边,提示图片按顺序用不同颜色框,字符特征图片列表
:rtype: img 原图, out 特征图片列表(每个字), templets 角度变换后的图
"""
imgBW = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = imgBW.shape
_, imgBW = cv2.threshold(imgBW, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img2 = cv2.erode(imgBW, None, iterations=3)
img2 = cv2.dilate(img2, None, iterations=3)
out = numpy.full((20 + h, 20 + w), 255, numpy.uint8)
copy_image(out, 10, 10, img2)
out, cnts, hierarchy = cv2.findContours(out, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE)
rects = []
# cnts[-1] 边框
for cnt in cnts[:-1]:
cnt -= 10
x1 = cnt[:, :, 0].min()
y1 = cnt[:, :, 1].min()
x2 = cnt[:, :, 0].max()
y2 = cnt[:, :, 1].max()
x1 = 0 if x1 < 0 else x1
y1 = 0 if y1 < 0 else y1
x2 = w - 1 if x2 > w - 1 else x2
y2 = h - 1 if y2 > h - 1 else y2
rects.append((x1, y1, x2, y2))
cv2.drawContours(img, cnt, -1, [0, 0, 255])
# cv2.rectangle(img, (x1, y1), (x2, y2), [0, 0, 255])
rects.sort()
out = numpy.full(imgBW.shape, 255, numpy.uint8)
x0 = spacing = 3
templets = []
for x1, y1, x2, y2 in rects:
imgchar = numpy.full((30, 30), 255, numpy.uint8)
tmpl = imgBW[y1:y2 + 1, x1:x2 + 1]
if value2 != (max_value2 // 2):
tmpl = rotate_image(tmpl, (max_value2 // 2 - value2) * 10)
templets.append(tmpl)
copy_image(imgchar, 0, (30 - y2 + y1 - 1) // 2, tmpl)
copy_image(out, x0, 0, imgchar)
x0 += x2 - x1 + 1 + spacing
out = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR)
i = 0
x0 = spacing
for x1, y1, x2, y2 in rects:
cv2.rectangle(out, (x0, 0), (x0 + x2 - x1 + 1, 29), COLORS[i])
x0 += x2 - x1 + 1 + spacing
i += 1
return img, out, templets
def get_bgimg(img, templets):
"""
处理背景图
:param img:
:type img: ndarray
:param templets: 实例图片列表
:type templets: list
:return: 处理后背景图,匹配区域用响应颜色框,四种匹配方式对应4个不同位置提示
1minLoc 2maxLoc 正常的匹配结果
3minLoc 4maxLoc 反转后匹配结果
:rtype:
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if value3 == 0:
ret, dst = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)
elif value3 == 1:
ret, dst = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_TRIANGLE)
elif value3 == 2:
ret, dst = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
else:
ret, dst = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
methods = (cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED, cv2.TM_CCORR,
cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED)
dst2 = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
matchs = []
for t in templets:
method = methods[value1 % len(methods)]
match = []
result = cv2.matchTemplate(dst, t, method)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
match.append((minLoc, t.shape))
match.append((maxLoc, t.shape))
t ^= 255
result = cv2.matchTemplate(dst, t, method)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
match.append((minLoc, t.shape))
match.append((maxLoc, t.shape))
matchs.append(match)
i = 0
for m in matchs:
no = 0
for (x, y), (w, h) in m:
no += 1
cv2.rectangle(dst2, (x, y), (x + w, y + h), COLORS[i])
if no == 1:
align = 'lt'
elif no == 2:
align = 'rt'
elif no == 3:
align = 'lb'
elif no == 4:
align = 'rb'
else:
align = 'cc'
# 1minLoc 2maxLoc
# 3minLoc 4maxLoc 反转后匹配结果
put_text(dst2, 'x', (x, y, w, h), COLORS[i], align)
i += 1
return dst2
def rotate_image(img, angle):
# 以图片中学为原点,逆时针旋转
# angle 旋转角度(度) 正值表示逆时针旋转
# getRotationMatrix2D(center, angle, scale) → retval
# cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) → dst
ssize = img.shape
center = (ssize[0] / 2, ssize[1] / 2)
m = cv2.getRotationMatrix2D(center, angle, scale=1)
dst = cv2.warpAffine(img, m, ssize, borderValue=(255, 255, 255))
return dst
def put_text(img, text, box, color, align='cc'):
font_face = cv2.FONT_HERSHEY_PLAIN
font_scale = 1
thickness = 1
retval, baseline = cv2.getTextSize(text, font_face, font_scale, thickness)
x, y = Align(align).get_bottomleft(box, retval)
# y -= baseline
y -= thickness
cv2.putText(img, text, (x, y), font_face, font_scale, color, thickness)
def get_edges(img):
threshold = 1
edges = cv2.Canny(img, 100, 200)
img[edges > 0] = [0, 0, 255]
return img
def get_grbcut(img):
bgdmodel = numpy.zeros((1, 65), numpy.float64)
fgdmodel = numpy.zeros((1, 65), numpy.float64)
mask = numpy.zeros(img.shape[:2], dtype=numpy.uint8)
rect = (0, 0, img.shape[0], img.shape[1])
cv2.grabCut(img, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT)
# cv2.grabCut(img, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_MASK)
mask2 = numpy.where((mask == 1) | (mask == 3), 255, 0).astype('uint8')
output = cv2.bitwise_and(img, img, mask=mask2)
return output
def get_sobel(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x = cv2.Sobel(gray, -1, 0, 1, 3)
y = cv2.Sobel(gray, -1, 1, 0, 3)
output = cv2.addWeighted(x, 0.5, y, 0.5, 0)
return cv2.cvtColor(output, cv2.COLOR_GRAY2BGR)
def get_watershed(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, dst = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# noise removal
kernel = numpy.ones((3, 3), numpy.uint8)
opening = cv2.morphologyEx(dst, cv2.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
255, 0)
# Finding unknown region
sure_fg = numpy.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers + 1
# Now, mark the region of unknown with zero
markers[unknown == 255] = 0
markers = cv2.watershed(img, markers)
img[markers == -1] = [0, 0, 255]
return img
def get_threshold(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, dst = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
# dst = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)
# dst = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, 0)
# return cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
def get_img(idx, fromlocal=True):
if fromlocal:
imgpath = 'e:/tyc2/verify'
bgimg = cv2.imread(imgpath + '/bg{:04d}.png'.format(idx),
cv2.IMREAD_ANYCOLOR)
tgimg = cv2.imread(imgpath + '/tg{:04d}.png'.format(idx),
cv2.IMREAD_ANYCOLOR)
else:
url = 'http://antirobot.tianyancha.com/captcha/getCaptcha.json?t={}'.format(
int(time.time() * 1000))
resp = requests.get(url).json()
data = resp['data']
bg = base64.standard_b64decode(data['bgImage'])
tg = base64.standard_b64decode(data['targetImage'])
nparr = numpy.frombuffer(bg, numpy.uint8)
bgimg = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
nparr = numpy.frombuffer(tg, numpy.uint8)
tgimg = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
return bgimg, tgimg
def copy_image(dst, x, y, src):
h, w = src.shape[:2]
numpy.copyto(dst[y:y + h, x:x + w], src)
def on_change1(pos, userdata=None):
global value1
value1 = pos
on_draw()
pass
def on_change2(pos, userdata=None):
global value2
value2 = pos
on_draw()
pass
def on_change3(pos, userdata=None):
global value3
value3 = pos
on_draw()
pass
def on_draw():
bkimg = numpy.full((270, 340, 3), 255, numpy.uint8)
bgimg, tgimg = get_img(curidx)
tgimg, tgimg2, templets = get_tgimg(tgimg)
bgimg2 = get_bgimg(bgimg, templets)
copy_image(bkimg, 10, 10, bgimg)
copy_image(bkimg, 10, 120, bgimg2)
copy_image(bkimg, 10, 230, tgimg)
copy_image(bkimg, 140, 230, tgimg2)
show_image(bkimg, MAIN_WINDOW_NAME)
def show_image(img, title='debug'):
cv2.namedWindow(title, cv2.WINDOW_KEEPRATIO)
cv2.imshow(title, img)
def cv2test():
global curidx, value1
cv2.namedWindow(MAIN_WINDOW_NAME, cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(MAIN_WINDOW_NAME, 340, 370)
cv2.setWindowTitle(MAIN_WINDOW_NAME, 'verify')
cv2.createTrackbar('match', MAIN_WINDOW_NAME, value1, 5, on_change1)
cv2.createTrackbar('angle', MAIN_WINDOW_NAME, value2, max_value2,
on_change2)
cv2.createTrackbar('threshold', MAIN_WINDOW_NAME, value3, 3, on_change3)
history = []
curidx = 118
history.append(curidx)
while True:
cv2.setWindowTitle(MAIN_WINDOW_NAME, 'verify {}'.format(curidx))
on_draw()
key = cv2.waitKeyEx()
if key in (0x20, ): # Spacce
value1 += 1
value1 %= 6
cv2.setTrackbarPos('match', MAIN_WINDOW_NAME, value1)
elif key in (0x270000, 0x0d): # Right, Enter
curidx = random.randint(1, 338)
history.append(curidx)
if len(history) > 100:
history.pop(0)
elif key in (0x250000, 8): # Left,Backspace
if len(history):
curidx = history.pop()
elif key in (27, -1): # Esc CloseAllWindow
cv2.destroyAllWindows()
break
else:
print(hex(key))
if __name__ == '__main__':
cv2test()
| [
"numpy.uint8",
"numpy.copyto",
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.imdecode",
"base64.standard_b64decode",
"cv2.resizeWindow",
"cv2.threshold",
"cv2.erode",
"numpy.where",
"cv2.minMaxLoc",
"cv2.addWeighted",
"cv2.distanceTransform",
"cv2.connectedComponents",
"... | [((1368, 1405), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1380, 1405), False, 'import cv2\n'), ((1418, 1437), 'numpy.float32', 'numpy.float32', (['gray'], {}), '(gray)\n', (1431, 1437), False, 'import numpy\n'), ((1449, 1483), 'cv2.cornerHarris', 'cv2.cornerHarris', (['gray', '(2)', '(3)', '(0.04)'], {}), '(gray, 2, 3, 0.04)\n', (1465, 1483), False, 'import cv2\n'), ((2142, 2179), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2154, 2179), False, 'import cv2\n'), ((2220, 2285), 'cv2.threshold', 'cv2.threshold', (['imgBW', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(imgBW, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2233, 2285), False, 'import cv2\n'), ((2330, 2366), 'cv2.erode', 'cv2.erode', (['imgBW', 'None'], {'iterations': '(3)'}), '(imgBW, None, iterations=3)\n', (2339, 2366), False, 'import cv2\n'), ((2379, 2415), 'cv2.dilate', 'cv2.dilate', (['img2', 'None'], {'iterations': '(3)'}), '(img2, None, iterations=3)\n', (2389, 2415), False, 'import cv2\n'), ((2427, 2473), 'numpy.full', 'numpy.full', (['(20 + h, 20 + w)', '(255)', 'numpy.uint8'], {}), '((20 + h, 20 + w), 255, numpy.uint8)\n', (2437, 2473), False, 'import numpy\n'), ((2537, 2596), 'cv2.findContours', 'cv2.findContours', (['out', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_NONE'], {}), '(out, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n', (2553, 2596), False, 'import cv2\n'), ((3197, 3238), 'numpy.full', 'numpy.full', (['imgBW.shape', '(255)', 'numpy.uint8'], {}), '(imgBW.shape, 255, numpy.uint8)\n', (3207, 3238), False, 'import numpy\n'), ((3714, 3751), 'cv2.cvtColor', 'cv2.cvtColor', (['out', 'cv2.COLOR_GRAY2BGR'], {}), '(out, cv2.COLOR_GRAY2BGR)\n', (3726, 3751), False, 'import cv2\n'), ((4279, 4316), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4291, 4316), False, 'import cv2\n'), ((5044, 5081), 'cv2.cvtColor', 'cv2.cvtColor', (['dst', 'cv2.COLOR_GRAY2BGR'], {}), '(dst, cv2.COLOR_GRAY2BGR)\n', (5056, 5081), False, 'import cv2\n'), ((6570, 6617), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle'], {'scale': '(1)'}), '(center, angle, scale=1)\n', (6593, 6617), False, 'import cv2\n'), ((6629, 6687), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'm', 'ssize'], {'borderValue': '(255, 255, 255)'}), '(img, m, ssize, borderValue=(255, 255, 255))\n', (6643, 6687), False, 'import cv2\n'), ((6861, 6916), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font_face', 'font_scale', 'thickness'], {}), '(text, font_face, font_scale, thickness)\n', (6876, 6916), False, 'import cv2\n'), ((7016, 7087), 'cv2.putText', 'cv2.putText', (['img', 'text', '(x, y)', 'font_face', 'font_scale', 'color', 'thickness'], {}), '(img, text, (x, y), font_face, font_scale, color, thickness)\n', (7027, 7087), False, 'import cv2\n'), ((7145, 7169), 'cv2.Canny', 'cv2.Canny', (['img', '(100)', '(200)'], {}), '(img, 100, 200)\n', (7154, 7169), False, 'import cv2\n'), ((7262, 7297), 'numpy.zeros', 'numpy.zeros', (['(1, 65)', 'numpy.float64'], {}), '((1, 65), numpy.float64)\n', (7273, 7297), False, 'import numpy\n'), ((7314, 7349), 'numpy.zeros', 'numpy.zeros', (['(1, 65)', 'numpy.float64'], {}), '((1, 65), numpy.float64)\n', (7325, 7349), False, 'import numpy\n'), ((7362, 7407), 'numpy.zeros', 'numpy.zeros', (['img.shape[:2]'], {'dtype': 'numpy.uint8'}), '(img.shape[:2], dtype=numpy.uint8)\n', (7373, 7407), False, 'import numpy\n'), ((7460, 7534), 'cv2.grabCut', 'cv2.grabCut', (['img', 'mask', 'rect', 'bgdmodel', 'fgdmodel', '(1)', 'cv2.GC_INIT_WITH_RECT'], {}), '(img, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT)\n', (7471, 7534), False, 'import cv2\n'), ((7707, 7744), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask2'}), '(img, img, mask=mask2)\n', (7722, 7744), False, 'import cv2\n'), ((7801, 7838), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (7813, 7838), False, 'import cv2\n'), ((7848, 7876), 'cv2.Sobel', 'cv2.Sobel', (['gray', '(-1)', '(0)', '(1)', '(3)'], {}), '(gray, -1, 0, 1, 3)\n', (7857, 7876), False, 'import cv2\n'), ((7886, 7914), 'cv2.Sobel', 'cv2.Sobel', (['gray', '(-1)', '(1)', '(0)', '(3)'], {}), '(gray, -1, 1, 0, 3)\n', (7895, 7914), False, 'import cv2\n'), ((7929, 7963), 'cv2.addWeighted', 'cv2.addWeighted', (['x', '(0.5)', 'y', '(0.5)', '(0)'], {}), '(x, 0.5, y, 0.5, 0)\n', (7944, 7963), False, 'import cv2\n'), ((7976, 8016), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_GRAY2BGR'], {}), '(output, cv2.COLOR_GRAY2BGR)\n', (7988, 8016), False, 'import cv2\n'), ((8058, 8095), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (8070, 8095), False, 'import cv2\n'), ((8112, 8176), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (8125, 8176), False, 'import cv2\n'), ((8214, 8245), 'numpy.ones', 'numpy.ones', (['(3, 3)', 'numpy.uint8'], {}), '((3, 3), numpy.uint8)\n', (8224, 8245), False, 'import numpy\n'), ((8261, 8320), 'cv2.morphologyEx', 'cv2.morphologyEx', (['dst', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(2)'}), '(dst, cv2.MORPH_OPEN, kernel, iterations=2)\n', (8277, 8320), False, 'import cv2\n'), ((8364, 8405), 'cv2.dilate', 'cv2.dilate', (['opening', 'kernel'], {'iterations': '(3)'}), '(opening, kernel, iterations=3)\n', (8374, 8405), False, 'import cv2\n'), ((8464, 8510), 'cv2.distanceTransform', 'cv2.distanceTransform', (['opening', 'cv2.DIST_L2', '(5)'], {}), '(opening, cv2.DIST_L2, 5)\n', (8485, 8510), False, 'import cv2\n'), ((8676, 8696), 'numpy.uint8', 'numpy.uint8', (['sure_fg'], {}), '(sure_fg)\n', (8687, 8696), False, 'import numpy\n'), ((8712, 8742), 'cv2.subtract', 'cv2.subtract', (['sure_bg', 'sure_fg'], {}), '(sure_bg, sure_fg)\n', (8724, 8742), False, 'import cv2\n'), ((8787, 8819), 'cv2.connectedComponents', 'cv2.connectedComponents', (['sure_fg'], {}), '(sure_fg)\n', (8810, 8819), False, 'import cv2\n'), ((9013, 9040), 'cv2.watershed', 'cv2.watershed', (['img', 'markers'], {}), '(img, markers)\n', (9026, 9040), False, 'import cv2\n'), ((9136, 9173), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (9148, 9173), False, 'import cv2\n'), ((9190, 9254), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (9203, 9254), False, 'import cv2\n'), ((9269, 9306), 'cv2.cvtColor', 'cv2.cvtColor', (['dst', 'cv2.COLOR_GRAY2BGR'], {}), '(dst, cv2.COLOR_GRAY2BGR)\n', (9281, 9306), False, 'import cv2\n'), ((10483, 10523), 'numpy.copyto', 'numpy.copyto', (['dst[y:y + h, x:x + w]', 'src'], {}), '(dst[y:y + h, x:x + w], src)\n', (10495, 10523), False, 'import numpy\n'), ((10866, 10909), 'numpy.full', 'numpy.full', (['(270, 340, 3)', '(255)', 'numpy.uint8'], {}), '((270, 340, 3), 255, numpy.uint8)\n', (10876, 10909), False, 'import numpy\n'), ((11280, 11324), 'cv2.namedWindow', 'cv2.namedWindow', (['title', 'cv2.WINDOW_KEEPRATIO'], {}), '(title, cv2.WINDOW_KEEPRATIO)\n', (11295, 11324), False, 'import cv2\n'), ((11330, 11352), 'cv2.imshow', 'cv2.imshow', (['title', 'img'], {}), '(title, img)\n', (11340, 11352), False, 'import cv2\n'), ((11405, 11460), 'cv2.namedWindow', 'cv2.namedWindow', (['MAIN_WINDOW_NAME', 'cv2.WINDOW_KEEPRATIO'], {}), '(MAIN_WINDOW_NAME, cv2.WINDOW_KEEPRATIO)\n', (11420, 11460), False, 'import cv2\n'), ((11466, 11510), 'cv2.resizeWindow', 'cv2.resizeWindow', (['MAIN_WINDOW_NAME', '(340)', '(370)'], {}), '(MAIN_WINDOW_NAME, 340, 370)\n', (11482, 11510), False, 'import cv2\n'), ((11516, 11562), 'cv2.setWindowTitle', 'cv2.setWindowTitle', (['MAIN_WINDOW_NAME', '"""verify"""'], {}), "(MAIN_WINDOW_NAME, 'verify')\n", (11534, 11562), False, 'import cv2\n'), ((11568, 11636), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""match"""', 'MAIN_WINDOW_NAME', 'value1', '(5)', 'on_change1'], {}), "('match', MAIN_WINDOW_NAME, value1, 5, on_change1)\n", (11586, 11636), False, 'import cv2\n'), ((11642, 11719), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""angle"""', 'MAIN_WINDOW_NAME', 'value2', 'max_value2', 'on_change2'], {}), "('angle', MAIN_WINDOW_NAME, value2, max_value2, on_change2)\n", (11660, 11719), False, 'import cv2\n'), ((11749, 11821), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""threshold"""', 'MAIN_WINDOW_NAME', 'value3', '(3)', 'on_change3'], {}), "('threshold', MAIN_WINDOW_NAME, value3, 3, on_change3)\n", (11767, 11821), False, 'import cv2\n'), ((3059, 3102), 'cv2.drawContours', 'cv2.drawContours', (['img', 'cnt', '(-1)', '[0, 0, 255]'], {}), '(img, cnt, -1, [0, 0, 255])\n', (3075, 3102), False, 'import cv2\n'), ((3333, 3371), 'numpy.full', 'numpy.full', (['(30, 30)', '(255)', 'numpy.uint8'], {}), '((30, 30), 255, numpy.uint8)\n', (3343, 3371), False, 'import numpy\n'), ((3824, 3886), 'cv2.rectangle', 'cv2.rectangle', (['out', '(x0, 0)', '(x0 + x2 - x1 + 1, 29)', 'COLORS[i]'], {}), '(out, (x0, 0), (x0 + x2 - x1 + 1, 29), COLORS[i])\n', (3837, 3886), False, 'import cv2\n'), ((4358, 4426), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)\n', (4371, 4426), False, 'import cv2\n'), ((5212, 5245), 'cv2.matchTemplate', 'cv2.matchTemplate', (['dst', 't', 'method'], {}), '(dst, t, method)\n', (5229, 5245), False, 'import cv2\n'), ((5288, 5309), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['result'], {}), '(result)\n', (5301, 5309), False, 'import cv2\n'), ((5430, 5463), 'cv2.matchTemplate', 'cv2.matchTemplate', (['dst', 't', 'method'], {}), '(dst, t, method)\n', (5447, 5463), False, 'import cv2\n'), ((5506, 5527), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['result'], {}), '(result)\n', (5519, 5527), False, 'import cv2\n'), ((10066, 10108), 'base64.standard_b64decode', 'base64.standard_b64decode', (["data['bgImage']"], {}), "(data['bgImage'])\n", (10091, 10108), False, 'import base64\n'), ((10123, 10169), 'base64.standard_b64decode', 'base64.standard_b64decode', (["data['targetImage']"], {}), "(data['targetImage'])\n", (10148, 10169), False, 'import base64\n'), ((10187, 10220), 'numpy.frombuffer', 'numpy.frombuffer', (['bg', 'numpy.uint8'], {}), '(bg, numpy.uint8)\n', (10203, 10220), False, 'import numpy\n'), ((10238, 10278), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_ANYCOLOR'], {}), '(nparr, cv2.IMREAD_ANYCOLOR)\n', (10250, 10278), False, 'import cv2\n'), ((10298, 10331), 'numpy.frombuffer', 'numpy.frombuffer', (['tg', 'numpy.uint8'], {}), '(tg, numpy.uint8)\n', (10314, 10331), False, 'import numpy\n'), ((10349, 10389), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_ANYCOLOR'], {}), '(nparr, cv2.IMREAD_ANYCOLOR)\n', (10361, 10389), False, 'import cv2\n'), ((12011, 12026), 'cv2.waitKeyEx', 'cv2.waitKeyEx', ([], {}), '()\n', (12024, 12026), False, 'import cv2\n'), ((4504, 4576), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_TRIANGLE)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_TRIANGLE)\n', (4517, 4576), False, 'import cv2\n'), ((5761, 5815), 'cv2.rectangle', 'cv2.rectangle', (['dst2', '(x, y)', '(x + w, y + h)', 'COLORS[i]'], {}), '(dst2, (x, y), (x + w, y + h), COLORS[i])\n', (5774, 5815), False, 'import cv2\n'), ((7630, 7676), 'numpy.where', 'numpy.where', (['((mask == 1) | (mask == 3))', '(255)', '(0)'], {}), '((mask == 1) | (mask == 3), 255, 0)\n', (7641, 7676), False, 'import numpy\n'), ((12129, 12182), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""match"""', 'MAIN_WINDOW_NAME', 'value1'], {}), "('match', MAIN_WINDOW_NAME, value1)\n", (12147, 12182), False, 'import cv2\n'), ((4654, 4718), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (4667, 4718), False, 'import cv2\n'), ((4784, 4852), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (4797, 4852), False, 'import cv2\n'), ((9998, 10015), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (10010, 10015), False, 'import requests\n'), ((12260, 12282), 'random.randint', 'random.randint', (['(1)', '(338)'], {}), '(1, 338)\n', (12274, 12282), False, 'import random\n'), ((9961, 9972), 'time.time', 'time.time', ([], {}), '()\n', (9970, 9972), False, 'import time\n'), ((12577, 12600), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12598, 12600), False, 'import cv2\n')] |
# %% [markdown]
# # 📃 Solution for Exercise M5.01
#
# In the previous notebook, we showed how a tree with a depth of 1 level was
# working. The aim of this exercise is to repeat part of the previous
# experiment for a depth with 2 levels to show how the process of partitioning
# is repeated over time.
#
# Before to start, we will:
#
# * load the dataset;
# * split the dataset into training and testing dataset;
# * define the function to show the classification decision function.
# %%
import pandas as pd
penguins = pd.read_csv("../datasets/penguins_classification.csv")
culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_column = "Species"
# %% [markdown]
# ```{note}
# If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.
# ```
# %%
from sklearn.model_selection import train_test_split
data, target = penguins[culmen_columns], penguins[target_column]
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=0
)
range_features = {
feature_name: (data[feature_name].min() - 1, data[feature_name].max() + 1)
for feature_name in data.columns
}
# %%
import numpy as np
import matplotlib.pyplot as plt
def plot_decision_function(fitted_classifier, range_features, ax=None):
"""Plot the boundary of the decision function of a classifier."""
from sklearn.preprocessing import LabelEncoder
feature_names = list(range_features.keys())
# create a grid to evaluate all possible samples
plot_step = 0.02
xx, yy = np.meshgrid(
np.arange(*range_features[feature_names[0]], plot_step),
np.arange(*range_features[feature_names[1]], plot_step),
)
# compute the associated prediction
Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = LabelEncoder().fit_transform(Z)
Z = Z.reshape(xx.shape)
# make the plot of the boundary and the data samples
if ax is None:
_, ax = plt.subplots()
ax.contourf(xx, yy, Z, alpha=0.4, cmap="RdBu")
return ax
# %% [markdown]
# Create a decision tree classifier with a maximum depth of 2 levels and fit
# the training data. Once this classifier trained, plot the data and the
# decision boundary to see the benefit of increasing the depth.
# %%
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(max_depth=2)
tree.fit(data_train, target_train)
# %%
import seaborn as sns
palette = ["tab:red", "tab:blue", "black"]
ax = sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],
hue=target_column, palette=palette)
plot_decision_function(tree, range_features, ax=ax)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Decision boundary using a logistic regression")
# %% [markdown]
# Did we make use of the feature "Culmen Length"?
# Plot the tree using the function `sklearn.tree.plot_tree` to find out!
# %%
from sklearn.tree import plot_tree
_, ax = plt.subplots(figsize=(16, 12))
_ = plot_tree(tree, feature_names=culmen_columns,
class_names=tree.classes_, impurity=False, ax=ax)
# %% [markdown]
# We see that the second tree level used the "Culmen Length" to make
# two new decisions. Qualitatively, we saw that such a simple tree was enough
# to classify the penguins' species.
#
# Compute the accuracy of the decision tree on the testing data.
# %%
test_score = tree.fit(data_train, target_train).score(data_test, target_test)
print(f"Accuracy of the DecisionTreeClassifier: {test_score:.2f}")
# %% [markdown]
# At this stage, we have the intuition that a decision tree is built by
# successively partitioning the feature space, considering one feature at a
# time.
#
# We predict an Adelie penguin if the feature value is below the threshold,
# which is not surprising since this partition was almost pure. If the feature
# value is above the threshold, we predict the Gentoo penguin, the class that
# is most probable.
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"numpy.arange",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"seaborn.scatterplot",
"sklearn.tree.plot_tree",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((522, 576), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/penguins_classification.csv"""'], {}), "('../datasets/penguins_classification.csv')\n", (533, 576), True, 'import pandas as pd\n'), ((1019, 1065), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'random_state': '(0)'}), '(data, target, random_state=0)\n', (1035, 1065), False, 'from sklearn.model_selection import train_test_split\n'), ((2388, 2423), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(2)'}), '(max_depth=2)\n', (2410, 2423), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2536, 2648), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'penguins', 'x': 'culmen_columns[0]', 'y': 'culmen_columns[1]', 'hue': 'target_column', 'palette': 'palette'}), '(data=penguins, x=culmen_columns[0], y=culmen_columns[1],\n hue=target_column, palette=palette)\n', (2551, 2648), True, 'import seaborn as sns\n'), ((2718, 2772), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (2728, 2772), True, 'import matplotlib.pyplot as plt\n'), ((2777, 2835), 'matplotlib.pyplot.title', 'plt.title', (['"""Decision boundary using a logistic regression"""'], {}), "('Decision boundary using a logistic regression')\n", (2786, 2835), True, 'import matplotlib.pyplot as plt\n'), ((3026, 3056), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (3038, 3056), True, 'import matplotlib.pyplot as plt\n'), ((3061, 3160), 'sklearn.tree.plot_tree', 'plot_tree', (['tree'], {'feature_names': 'culmen_columns', 'class_names': 'tree.classes_', 'impurity': '(False)', 'ax': 'ax'}), '(tree, feature_names=culmen_columns, class_names=tree.classes_,\n impurity=False, ax=ax)\n', (3070, 3160), False, 'from sklearn.tree import plot_tree\n'), ((1618, 1673), 'numpy.arange', 'np.arange', (['*range_features[feature_names[0]]', 'plot_step'], {}), '(*range_features[feature_names[0]], plot_step)\n', (1627, 1673), True, 'import numpy as np\n'), ((1683, 1738), 'numpy.arange', 'np.arange', (['*range_features[feature_names[1]]', 'plot_step'], {}), '(*range_features[feature_names[1]], plot_step)\n', (1692, 1738), True, 'import numpy as np\n'), ((2013, 2027), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2025, 2027), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1874), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1872, 1874), False, 'from sklearn.preprocessing import LabelEncoder\n')] |
from trafpy.generator.src.dists import val_dists, node_dists
from trafpy.generator.src import tools
import numpy as np
import time
import copy
import random
from collections import defaultdict # use for initialising arbitrary length nested dict
def create_flow_centric_demand_data(num_demands,
eps,
node_dist,
flow_size_dist,
interarrival_time_dist,
duration_time_dist=None,
print_data=False):
if print_data:
print('Generating {} flow demands...'.format(num_demands))
started = time.time()
# initialise
f_ids = ['flow_'+str(i) for i in range(num_demands)]
if duration_time_dist is not None:
# duplicate
f_ids2 = ['flow_'+str(i) for i in range(num_demands)]
flow_ids = f_ids + f_ids2 # duplicate
establish = np.concatenate((np.ones((int(len(flow_ids)))),
np.zeros((int(len(flow_ids))))))
else:
flow_ids = f_ids
establish = np.concatenate((np.ones((int(len(flow_ids)))),
np.zeros((int(len(flow_ids))))))
flow_sizes = np.zeros((int(len(flow_ids))))
if duration_time_dist is not None:
duplicate=True
else:
duplicate=False
sn, dn = node_dists.gen_node_demands(eps=eps,
node_dist=node_dist,
num_demands=num_demands,
duplicate=duplicate)
# create demand flow_sizes
# TODO: Not sure what below function was for but generates strange rand var dists, should use gen_rand_vars_from_discretised_dist() instead.
# flow_sizes[:num_demands] = val_dists.gen_val_dist_data(val_dist=list(flow_size_dist.values()),
# num_vals_to_gen=num_demands,
# min_val=min(flow_size_dist.keys()),
# max_val=max(flow_size_dist.keys()))
flow_sizes[:num_demands] = val_dists.gen_rand_vars_from_discretised_dist(unique_vars=list(flow_size_dist.keys()),
probabilities=list(flow_size_dist.values()),
num_demands=num_demands)
if duration_time_dist is not None:
flow_sizes[num_demands:] = flow_sizes[:num_demands]
# create event time array
interarrival_times = val_dists.gen_rand_vars_from_discretised_dist(unique_vars=list(interarrival_time_dist.keys()),
probabilities=list(interarrival_time_dist.values()),
num_demands=num_demands)
if duration_time_dist is not None:
interarrival_times = val_dists.gen_rand_vars_from_discretised_dist(unique_vars=list(interarrival_time_dist.keys()),
probabilities=list(interarrival_time_dist.values()),
num_demands=num_demands)
else:
duration_times = None
event_times = tools.gen_event_times(interarrival_times, duration_times)
index, event_times_sorted = np.argsort(event_times), np.sort(event_times)
# compile data into demand data dict
demand_data = {'flow_id': np.array(flow_ids)[index],
'sn': sn[index],
'dn': dn[index],
'flow_size': flow_sizes[index],
'event_time': event_times_sorted,
'establish': establish[index].astype(int),
'index': index}
ended = time.time()
if print_data:
print('Generated {} flow demands in {} seconds.'.format(num_demands,ended-started))
return demand_data
def duplicate_demands_in_demand_data_dict(demand_data, method='all_eps', **kwargs):
'''
If method == 'all_eps', will duplicate all demands by adding final event time
over all endpoints to each event time
if method == 'per_ep', will duplicate all demands by adding final even time
for each endpoint's final event time
'''
copy_demand_data = copy.deepcopy(demand_data)
# ensure values of dict are lists
for key, value in copy_demand_data.items():
copy_demand_data[key] = list(value)
if method == 'all_eps':
# final_event_time = max(demand_data['event_time'])
num_demands = len(demand_data['flow_id'])
final_event_time = max(demand_data['event_time'])
first_event_time = min(demand_data['event_time'])
duration = final_event_time - first_event_time
for idx in range(len(demand_data['flow_id'])):
copy_demand_data['flow_id'].append('flow_{}'.format(int(idx+num_demands)))
copy_demand_data['sn'].append(demand_data['sn'][idx])
copy_demand_data['dn'].append(demand_data['dn'][idx])
copy_demand_data['flow_size'].append(demand_data['flow_size'][idx])
# copy_demand_data['event_time'].append(final_event_time + demand_data['event_time'][idx])
copy_demand_data['event_time'].append(duration + demand_data['event_time'][idx])
copy_demand_data['establish'].append(demand_data['establish'][idx])
copy_demand_data['index'].append(demand_data['index'][idx] + idx)
elif method == 'per_ep':
original_ep_info = group_demand_data_into_ep_info(copy_demand_data, eps=kwargs['eps'])
# idx_iterator = iter(range(num_demands))
duplicated_flows = {flow_id: False for flow_id in copy_demand_data['flow_id']}
print('Flows before duplication: {}'.format(len(copy_demand_data['flow_id'])))
idx_iterator = iter(range(len(copy_demand_data['flow_id'])))
for ep in kwargs['eps']:
# ep_info = group_demand_data_into_ep_info(copy_demand_data, eps=kwargs['eps'])
num_demands = len(original_ep_info[ep]['flow_id'])
first_event_time = min(original_ep_info[ep]['event_time'])
final_event_time = max(original_ep_info[ep]['event_time'])
duration = final_event_time - first_event_time
# DEBUG
total_info = sum(original_ep_info[ep]['flow_size'])
num_flows = len(original_ep_info[ep]['flow_id'])
load = total_info / (final_event_time - first_event_time)
print('Init {} duration: {} total info: {} | load: {} | flows: {}'.format(ep, duration, total_info, load, num_flows))
for ep_flow_idx in range(len(original_ep_info[ep]['flow_id'])):
flow_id = original_ep_info[ep]['flow_id'][ep_flow_idx]
if not duplicated_flows[flow_id]:
duplicated_flows[flow_id] = True
# not yet duplicated this flow
# i = find_index_of_int_in_str(flow_id)
# idx = int(flow_id[i:])
idx = next(idx_iterator)
# copy_demand_data['flow_id'].append('flow_{}'.format(int(idx+num_demands)))
copy_demand_data['flow_id'].append('flow_{}'.format(int(idx+len(demand_data['flow_id']))))
copy_demand_data['sn'].append(original_ep_info[ep]['sn'][ep_flow_idx])
copy_demand_data['dn'].append(original_ep_info[ep]['dn'][ep_flow_idx])
copy_demand_data['flow_size'].append(original_ep_info[ep]['flow_size'][ep_flow_idx])
copy_demand_data['event_time'].append(duration + original_ep_info[ep]['event_time'][ep_flow_idx])
copy_demand_data['establish'].append(original_ep_info[ep]['establish'][ep_flow_idx])
copy_demand_data['index'].append(original_ep_info[ep]['index'][ep_flow_idx] + idx)
else:
# already duplicated this flow in copy_demand_data
pass
# DEBUG
_ep_info = group_demand_data_into_ep_info(copy_demand_data, eps=kwargs['eps'])
final_event_time = max(_ep_info[ep]['event_time'])
first_event_time = min(_ep_info[ep]['event_time'])
total_info = sum(_ep_info[ep]['flow_size'])
load = total_info / (final_event_time - first_event_time)
num_flows = len(_ep_info[ep]['flow_id'])
print('Adjusted {} duration: {} total info: {} | load: {} | flows: {}'.format(ep, duration, total_info, load, num_flows))
print('Flows after duplication: {}'.format(len(copy_demand_data['flow_id'])))
# ensure values of dict are lists
for key, value in copy_demand_data.items():
copy_demand_data[key] = list(value)
return copy_demand_data
def find_index_of_int_in_str(string):
idx = 0
for char in string:
try:
int(char)
return idx
except ValueError:
# char is not an int
idx += 1
raise Exception('Could not find an integer in the string {}'.format(string))
def adjust_demand_load(demand_data,
network_load_config,
num_demands,
eps,
node_dist,
flow_size_dist,
interarrival_time_dist,
duration_time_dist,
print_data=False):
# adjust to get load fraction > target load fraction
demand_data, new_interarrival_time_dist, new_num_demands = increase_demand_load_to_target(demand_data,
num_demands,
interarrival_time_dist,
eps,
node_dist,
flow_size_dist,
network_load_config,
print_data=print_data)
# adjust back down to get load fraction <= target load fraction
demand_data, new_interarrival_time_dist = decrease_demand_load_to_target(demand_data,
new_num_demands,
interarrival_time_dist=new_interarrival_time_dist,
eps=eps,
node_dist=node_dist,
flow_size_dist=flow_size_dist,
network_load_config=network_load_config,
print_data=True)
# adjust further to ensure no endpoint link has load fraction >= endpoint link margin of e.g. 0.95 while ensuring still meet target load fraction
demand_data = adjust_demand_load_to_ep_link_margin(demand_data,
new_num_demands,
interarrival_time_dist=new_interarrival_time_dist,
eps=eps,
node_dist=node_dist,
flow_size_dist=flow_size_dist,
network_load_config=network_load_config,
print_data=True)
# organise data in demand_data in order of events arriving
index, event_times_sorted = np.argsort(demand_data['event_time']), np.sort(demand_data['event_time'])
demand_data = {'flow_id': np.array(demand_data['flow_id'])[index],
'sn': demand_data['sn'][index],
'dn': demand_data['dn'][index],
'flow_size': demand_data['flow_size'][index],
'event_time': event_times_sorted,
'establish': demand_data['establish'][index].astype(int),
'index': index}
return demand_data, interarrival_time_dist
def group_demand_data_into_ep_info(demand_data, eps):
nested_dict = lambda: defaultdict(nested_dict)
ep_info = nested_dict()
added_flow = {flow_id: False for flow_id in demand_data['flow_id']}
for ep in eps:
ep_info[ep]['flow_size'] = []
ep_info[ep]['event_time'] = []
ep_info[ep]['demand_data_idx'] = []
ep_info[ep]['flow_id'] = []
ep_info[ep]['establish'] = []
ep_info[ep]['index'] = []
ep_info[ep]['sn'] = []
ep_info[ep]['dn'] = []
# group demand data by ep
for idx in range(len(demand_data['flow_id'])):
if not added_flow[demand_data['flow_id'][idx]]:
# not yet added this flow
ep_info[demand_data['sn'][idx]]['flow_size'].append(demand_data['flow_size'][idx])
ep_info[demand_data['dn'][idx]]['flow_size'].append(demand_data['flow_size'][idx])
ep_info[demand_data['sn'][idx]]['event_time'].append(demand_data['event_time'][idx])
ep_info[demand_data['dn'][idx]]['event_time'].append(demand_data['event_time'][idx])
ep_info[demand_data['sn'][idx]]['demand_data_idx'].append(idx)
ep_info[demand_data['dn'][idx]]['demand_data_idx'].append(idx)
ep_info[demand_data['sn'][idx]]['flow_id'].append(demand_data['flow_id'][idx])
ep_info[demand_data['dn'][idx]]['flow_id'].append(demand_data['flow_id'][idx])
ep_info[demand_data['sn'][idx]]['establish'].append(demand_data['establish'][idx])
ep_info[demand_data['dn'][idx]]['establish'].append(demand_data['establish'][idx])
ep_info[demand_data['sn'][idx]]['index'].append(demand_data['index'][idx])
ep_info[demand_data['dn'][idx]]['index'].append(demand_data['index'][idx])
ep_info[demand_data['sn'][idx]]['sn'].append(demand_data['sn'][idx])
ep_info[demand_data['sn'][idx]]['dn'].append(demand_data['dn'][idx])
ep_info[demand_data['dn'][idx]]['sn'].append(demand_data['sn'][idx])
ep_info[demand_data['dn'][idx]]['dn'].append(demand_data['dn'][idx])
else:
# already added this flow
pass
return ep_info
def adjust_demand_load_to_ep_link_margin(demand_data,
new_num_demands,
interarrival_time_dist,
eps,
node_dist,
flow_size_dist,
network_load_config,
ep_link_margin=0.95,
increment_factor=1.001,
print_data=False):
'''
Decrease ep link loads of each ep link until <= ep link margin load (e.g. 0.95).
If after this decrease the overall load is below target load, increase lowest
ep link loads until get to target load. Therefore as target load tends to 1,
node load distribution tends towards uniform distribution (since load on all
eps will tend to 1) (however, flow size and node pair probability distributions
remain unchanged).
'''
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
load_fraction = load_rate / network_load_config['network_rate_capacity']
target_load_fraction = network_load_config['target_load_fraction']
network_rate_capacity = network_load_config['network_rate_capacity']
assert target_load_fraction <= 1, \
'Must have target load fraction <= 1 for compatability with network rate capacity.'
target_load_rate = network_rate_capacity*target_load_fraction
init_load_rate = copy.deepcopy(load_rate)
init_load_fraction = init_load_rate / network_rate_capacity
# group ep info from demand data
ep_info = group_demand_data_into_ep_info(demand_data, eps)
# decrease ep link load to below ep link margin for all ep links which exceed it
adjusted_eps = []
for ep in eps:
# total_info = sum(ep_info[ep]['flow_size'])
# time_first_flow_arrived = min(ep_info[ep]['event_time'])
# time_last_flow_arrived = max(ep_info[ep]['event_time'])
# ep_load_frac = (total_info / (time_last_flow_arrived-time_first_flow_arrived)) / network_load_config['ep_link_capacity']
ep_load_rate = get_flow_centric_demand_data_ep_load_rate(demand_data, ep, eps)
ep_load_frac = ep_load_rate / network_load_config['ep_link_capacity']
if ep_load_frac > ep_link_margin:
adjusted_eps.append(ep)
while ep_load_frac > ep_link_margin:
# must decrease load by spreading out arrival times
for i in range(len(ep_info[ep]['demand_data_idx'])):
demand_data['event_time'][ep_info[ep]['demand_data_idx'][i]] *= increment_factor
ep_info[ep]['event_time'][i] = demand_data['event_time'][ep_info[ep]['demand_data_idx'][i]]
# time_first_flow_arrived = min(ep_info[ep]['event_time'])
# time_last_flow_arrived = max(ep_info[ep]['event_time'])
# ep_load_frac = (total_info / (time_last_flow_arrived-time_first_flow_arrived)) / network_load_config['ep_link_capacity']
ep_load_rate = get_flow_centric_demand_data_ep_load_rate(demand_data, ep, eps)
ep_load_frac = ep_load_rate / network_load_config['ep_link_capacity']
# if overall load now below target load, increase loads of other ep links until get to target load
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
# load_frac = load_rate / network_rate_capacity
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
load_frac = load_rate / network_rate_capacity
while load_frac < 0.99 * target_load_fraction:
if len(adjusted_eps) == len(eps):
raise Exception('All eps have been adjusted to be <= {} load margin, but load frac is still {} (target {}). Change target load, or change distributions and/or topology to be valid for your desired target load (e.g. node distribution might be too heavily skewed).'.format(ep_link_margin, load_frac, target_load_fraction))
# DEBUG
ep_loads = {ep: None for ep in eps}
ep_last_event_times = {ep: None for ep in eps}
ep_first_event_times = {ep: None for ep in eps}
# print('Adjusted eps: {}'.format(adjusted_eps))
# NEW
for ep in ep_info.keys():
ep_last_event_time = max(ep_info[ep]['event_time'])
ep_last_event_times[ep] = ep_last_event_time
ep_with_last_event = max(ep_last_event_times, key=ep_last_event_times.get)
for ep in ep_info.keys():
# DEBUG
ep_load_rate = get_flow_centric_demand_data_ep_load_rate(demand_data, ep, eps)
ep_load_frac = ep_load_rate / network_load_config['ep_link_capacity']
first_event_time = min(ep_info[ep]['event_time'])
ep_loads[ep] = ep_load_frac
ep_first_event_times[ep] = first_event_time
# update
last_event_time = max(ep_info[ep]['event_time'])
ep_last_event_times[ep] = last_event_time
ep_with_last_event = max(ep_last_event_times, key=ep_last_event_times.get)
if ep in adjusted_eps:
# already adjusted this ep to <= ep link margin
pass
else:
# check that not about to exceed ep link rate
# time_last_flow_arrived = max(ep_info[ep]['event_time'])
# time_first_flow_arrived = min(ep_info[ep]['event_time'])
# total_info = sum(ep_info[ep]['flow_size'])
# ep_load_frac = (total_info / (time_last_flow_arrived-time_first_flow_arrived)) / network_load_config['ep_link_capacity']
ep_load_rate = get_flow_centric_demand_data_ep_load_rate(demand_data, ep, eps)
ep_load_frac = ep_load_rate / network_load_config['ep_link_capacity']
if ep_load_frac >= 0.99*ep_link_margin:
# can no longer increase load on this ep
adjusted_eps.append(ep)
else:
# can try increase load on this ep to reach overall target load
found_flow_to_adjust = False
for i in range(len(ep_info[ep]['demand_data_idx'])):
sn, dn = ep_info[ep]['sn'][i], ep_info[ep]['dn'][i]
if sn not in adjusted_eps and dn not in adjusted_eps:
found_flow_to_adjust = True
# # New
# demand_data['event_time'][ep_info[ep]['demand_data_idx'][i]] *= (1-(increment_factor-1))
# ep_info[ep]['event_time'][i] = demand_data['event_time'][ep_info[ep]['demand_data_idx'][i]]
# Old
if ep_with_last_event in adjusted_eps:
# cannot change overall load by adjusting total duration since load-limiting ep already at max load, must instead change total info
demand_data['flow_size'][ep_info[ep]['demand_data_idx'][i]] *= increment_factor
# ensure is integer
demand_data['flow_size'][ep_info[ep]['demand_data_idx'][i]] = int(demand_data['flow_size'][ep_info[ep]['demand_data_idx'][i]])
ep_info[ep]['flow_size'][i] = demand_data['flow_size'][ep_info[ep]['demand_data_idx'][i]]
else:
# can change overall load by adjusting total duration since load limiting ep not already at max load
demand_data['event_time'][ep_info[ep]['demand_data_idx'][i]] *= (1-(increment_factor-1))
ep_info[ep]['event_time'][i] = demand_data['event_time'][ep_info[ep]['demand_data_idx'][i]]
if not found_flow_to_adjust:
raise Exception('Adjusted ep loads as much as possible, but could only reach overall load {} (target {}). Either increase ep link margin (currently {}), decrease target load, or change distributions and/or topology to be valid for your requested overall load (e.g. node distributions might be too heavily skewed).'.format(load_frac, target_load_fraction, ep_link_margin))
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
# load_frac = load_rate / network_rate_capacity
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
load_frac = load_rate / network_rate_capacity
print('Overall load frac: {} | Target: {}'.format(load_frac, target_load_fraction))
# print('ep loads: {}\nep first event times: {}\nep last event times: {}\nep with last event:{}'.format(ep_loads, ep_first_event_times, ep_last_event_times, max(ep_last_event_times, key=ep_last_event_times.get)))
if print_data:
print('Final load rate | frac after adjusting ep loads to <= {}: {} | {}'.format(ep_link_margin, load_rate, load_frac))
return demand_data
def increase_demand_load_to_target(demand_data,
num_demands,
interarrival_time_dist,
eps,
node_dist,
flow_size_dist,
network_load_config,
increment_factor=0.5,
print_data=False):
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
# load_fraction = load_rate / network_load_config['network_rate_capacity']
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
load_fraction = load_rate / network_load_config['network_rate_capacity']
num_loops = 1
# adjust to get load fraction >= target load fraction
while load_fraction < network_load_config['target_load_fraction']:
# # increase number of demands by 1% to try increase loads
# num_demands = int(1.01 * num_demands)
# decrease interarrival times to try increase load
new_interarrival_time_dist = {}
for rand_var, prob in interarrival_time_dist.items():
new_rand_var = rand_var * increment_factor
new_interarrival_time_dist[new_rand_var] = prob
# update interarrival time dist
interarrival_time_dist = new_interarrival_time_dist
demand_data = create_flow_centric_demand_data(num_demands=num_demands,
eps=eps,
node_dist=node_dist,
flow_size_dist=flow_size_dist,
interarrival_time_dist=interarrival_time_dist,
print_data=print_data)
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
# load_fraction = load_rate / network_load_config['network_rate_capacity']
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
load_fraction = load_rate / network_load_config['network_rate_capacity']
num_loops += 1
if print_data:
print('Reached load of {} (target load {}) after {} loops.'.format(load_fraction, network_load_config['target_load_fraction'], num_loops))
if network_load_config['disable_timeouts']:
# keep running loop to infinity
if num_loops % 10 == 0:
if print_data:
print('Warning: Have disabled timeouts. Ran {} loops to try to reach {} network load (reached {} load so far). Set network_load_config[\'disable_timeouts\']=True if desired. Disable this warning by setting print_data=False when calling create_demand_data.'.format(num_loops, network_load_config['target_load_fraction'], load_fraction))
else:
if num_loops > 15:
raise Exception('Time out trying to reach requested network load fraction (reached {} but requested {}). Consider adjusting demand data parameters (e.g. increase flow size, decrease interarrival time, etc.), decreasing target_load_fraction, or decreasing network_rate_capacity. Alternatively, to disable timeouts, set network_load_config[\'disable_timeouts\'] = True.'.format(load_fraction, network_load_config['target_load_fraction']))
return demand_data, interarrival_time_dist, num_demands
def decrease_demand_load_to_target(demand_data,
num_demands,
interarrival_time_dist,
eps,
node_dist,
flow_size_dist,
network_load_config,
increment_factor=1.001,
print_data=False):
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
load_fraction = load_rate / network_load_config['network_rate_capacity']
target_load_fraction = network_load_config['target_load_fraction']
network_rate_capacity = network_load_config['network_rate_capacity']
assert target_load_fraction <= 1, \
'Must have target load fraction <= 1 for compatability with network rate capacity.'
target_load_rate = network_rate_capacity*target_load_fraction
init_load_rate = copy.deepcopy(load_rate)
init_load_fraction = init_load_rate / network_rate_capacity
if load_rate > target_load_rate:
# increase interarrival time dist until get target load rate
num_loops = 1
while load_rate > target_load_rate:
# adjust interarrival dist by adjusting event times
demand_data['event_time'] *= increment_factor
new_interarrival_time_dist = {}
# for rand_var, prob in interarrival_time_dist.items():
# new_rand_var = rand_var * increment_factor
# new_interarrival_time_dist[new_rand_var] = prob
# # update interarrival time dist
# interarrival_time_dist = new_interarrival_time_dist
# # re-create
# demand_data = create_flow_centric_demand_data(num_demands=num_demands,
# eps=eps,
# node_dist=node_dist,
# flow_size_dist=flow_size_dist,
# interarrival_time_dist=interarrival_time_dist,
# print_data=print_data)
# load_rate = get_flow_centric_demand_data_load_rate(demand_data, method='mean_per_ep', eps=eps)
load_rate = get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True)
num_loops += 1
if network_load_config['disable_timeouts']:
# keep running loop to infinity
if num_loops % 10 == 0:
if print_data:
print('Warning: Have disabled timeouts. Ran {} loops to try to reach {} network load (reached {} load so far). Set network_load_config[\'disable_timeouts\']=True if desired. Disable this warning by setting print_data=False when calling create_demand_data.'.format(num_loops, network_load_config['target_load_fraction'], load_fraction))
else:
if num_loops > 15:
raise Exception('Time out trying to reach requested network load fraction (reached {} but requested {}). Consider adjusting demand data parameters (e.g. increase flow size, decrease interarrival time, etc.), decreasing target_load_fraction, or decreasing network_rate_capacity. Alternatively, to disable timeouts, set network_load_config[\'disable_timeouts\'] = True.'.format(load_fraction, network_load_config['target_load_fraction']))
elif load_rate < target_load_rate:
load_fraction = load_rate / network_rate_capacity
raise Exception('Load is {}, but requested target load is {}. Either decrease target load or increase load in demand_data. To increase load in demand_data, increase number of demands generated or adjust e.g. event size, interarrival time, etc., then re-construct demand_data and pass back into this function.'.format(load_fraction, target_load_fraction))
else:
pass
load_fraction = load_rate / network_rate_capacity
if print_data:
print('Network rate capacity: {} Gbps'.format(network_rate_capacity))
print('Initial load rate: {} Gbps'.format(init_load_rate))
print('Initial load fraction: {}'.format(init_load_fraction))
print('Target load rate: {} Gbps'.format(target_load_rate))
print('Target load fraction: {}'.format(target_load_fraction))
print('Final load rate: {} Gbps'.format(load_rate))
print('Final load fraction: {}'.format(load_fraction))
print('Final number of demands: {}'.format(len(demand_data['flow_id'])))
return demand_data, new_interarrival_time_dist
def drop_random_flow_from_demand_data(demand_data):
event_indices = [i for i in range(len(demand_data['event_time']))]
flow_idx_to_drop = random.choice(event_indices)
num_loops = 0
while demand_data['flow_size'][flow_idx_to_drop] < 0 or demand_data['flow_size'][flow_idx_to_drop] == 0:
flow_idx_to_drop += 1
if flow_idx_to_drop > len(event_indices)-1:
# start from beginning
flow_idx_to_drop = 0
num_loops += 1
if num_loops > len(event_indices):
raise Exception('Cannot find event in demand_data to drop.')
for key in list(demand_data.keys()):
if type(demand_data[key]) == list:
new_data = demand_data[key]
del new_data[flow_idx_to_drop]
else:
# is numpy array
new_data = demand_data[key]
new_data = np.delete(new_data, flow_idx_to_drop)
demand_data[key] = new_data
return demand_data
def get_flow_centric_demand_data_ep_load_rate(demand_data, ep, eps, method='all_eps'):
'''
If method=='all_eps', duration is time_last_flow_arrived-time_first_flow_arrived
across all endpoints. If method=='per_ep', duration is time_last_flow_arrived-time_first_flow_arrived
for this specific ep.
'''
ep_info = group_demand_data_into_ep_info(demand_data, eps)
total_info = sum(ep_info[ep]['flow_size'])
if method == 'per_ep':
time_first_flow_arrived = min(ep_info[ep]['event_time'])
time_last_flow_arrived = max(ep_info[ep]['event_time'])
elif method == 'all_eps':
time_first_flow_arrived = min(demand_data['event_time'])
time_last_flow_arrived = max(demand_data['event_time'])
duration = time_last_flow_arrived - time_first_flow_arrived
load_rate = total_info / duration
return load_rate
def get_flow_centric_demand_data_overall_load_rate(demand_data, bidirectional_links=True):
'''
If flow connections are bidirectional_links, 1 flow takes up 2 endpoint links (the
source link and the destination link), therefore effecitvely takes up load rate
2*flow_size*duration bandwidth. If not bidriectional, only takes up
1*flow_size*duration since only occupies bandwidth for 1 of these links.
If method == 'mean_per_ep', will calculate the total network load as being the mean
average load on each endpoint link (i.e. sum info requests for each link ->
find load of each link -> find mean of ep link loads)
If method == 'mean_all_eps', will calculate the total network load as being
the average load over all endpoint links (i.e. sum info requests for all links
-> find overall load of network)
'''
info_arrived = get_flow_centric_demand_data_total_info_arrived(demand_data)
first_event_time, last_event_time = get_first_last_flow_arrival_times(demand_data)
duration = last_event_time - first_event_time
if bidirectional_links:
# 1 flow occupies 2 endpoint links therefore has 2*flow_size load
load_rate = 2*info_arrived/duration
else:
load_rate = info_arrived/duration
return load_rate
# def get_flow_centric_demand_data_load_rate(demand_data, method='mean_all_eps', bidirectional_links=True, **kwargs):
# '''
# If flow connections are bidirectional_links, 1 flow takes up 2 endpoint links (the
# source link and the destination link), therefore effecitvely takes up load rate
# 2*flow_size*duration bandwidth. If not bidriectional, only takes up
# 1*flow_size*duration since only occupies bandwidth for 1 of these links.
# If method == 'mean_per_ep', will calculate the total network load as being the mean
# average load on each endpoint link (i.e. sum info requests for each link ->
# find load of each link -> find mean of ep link loads)
# If method == 'mean_all_eps', will calculate the total network load as being
# the average load over all endpoint links (i.e. sum info requests for all links
# -> find overall load of network)
# '''
# info_arrived = get_flow_centric_demand_data_total_info_arrived(demand_data)
# first_event_time, last_event_time = get_first_last_flow_arrival_times(demand_data)
# duration = last_event_time - first_event_time
# if method == 'mean_per_ep':
# ep_loads = {ep: 0 for ep in kwargs['eps']}
# ep_info = group_demand_data_into_ep_info(demand_data, kwargs['eps'])
# for ep in kwargs['eps']:
# total_info = sum(ep_info[ep]['flow_size'])
# # time_first_flow_arrived = min(ep_info[ep]['event_time'])
# # time_last_flow_arrived = max(ep_info[ep]['event_time'])
# # duration = time_last_flow_arrived - time_first_flow_arrived
# ep_loads[ep] = total_info / duration
# load_rate = np.mean(list(ep_loads.values())) * len(kwargs['eps'])
# elif method == 'mean_all_eps':
# if bidirectional_links:
# # 1 flow occupies 2 endpoint links therefore has 2*flow_size load
# load_rate = 2*info_arrived/duration
# else:
# load_rate = info_arrived/duration
# else:
# raise Exception('Unrecognised load rate calculation method {}'.format(method))
# return load_rate
def get_flow_centric_demand_data_total_info_arrived(demand_data):
info_arrived = 0
# print('flow size {} {}'.format(type(demand_data['flow_size']), type(demand_data['flow_size'][0])))
for flow_size in demand_data['flow_size']:
if flow_size > 0:
info_arrived += flow_size
else:
pass
return info_arrived
def get_first_last_flow_arrival_times(demand_data):
arrival_times = []
for idx in range(len(demand_data['event_time'])):
if demand_data['flow_size'][idx] > 0 and demand_data['sn'][idx] != demand_data['dn'][idx]:
arrival_times.append(demand_data['event_time'][idx])
else:
pass
if len(arrival_times) == 0:
raise Exception('Could not find first event establish request with size > 0.. This occurs because either demand_data given does not contain any events, or because all events have had to be dropped to try get below your specified target load. Try increasing the target load or increasing the granularity of load per demand (by e.g. decreasing demand sizes, increasing total number of demands, etc.) when you generate your demand data so that this function can more easily hit your desired load target.')
time_first_flow_arrived = min(arrival_times)
time_last_flow_arrived = max(arrival_times)
return time_first_flow_arrived, time_last_flow_arrived
| [
"trafpy.generator.src.dists.node_dists.gen_node_demands",
"random.choice",
"numpy.delete",
"numpy.sort",
"trafpy.generator.src.tools.gen_event_times",
"numpy.argsort",
"numpy.array",
"collections.defaultdict",
"copy.deepcopy",
"time.time"
] | [((714, 725), 'time.time', 'time.time', ([], {}), '()\n', (723, 725), False, 'import time\n'), ((1442, 1550), 'trafpy.generator.src.dists.node_dists.gen_node_demands', 'node_dists.gen_node_demands', ([], {'eps': 'eps', 'node_dist': 'node_dist', 'num_demands': 'num_demands', 'duplicate': 'duplicate'}), '(eps=eps, node_dist=node_dist, num_demands=\n num_demands, duplicate=duplicate)\n', (1469, 1550), False, 'from trafpy.generator.src.dists import val_dists, node_dists\n'), ((3506, 3563), 'trafpy.generator.src.tools.gen_event_times', 'tools.gen_event_times', (['interarrival_times', 'duration_times'], {}), '(interarrival_times, duration_times)\n', (3527, 3563), False, 'from trafpy.generator.src import tools\n'), ((4030, 4041), 'time.time', 'time.time', ([], {}), '()\n', (4039, 4041), False, 'import time\n'), ((4550, 4576), 'copy.deepcopy', 'copy.deepcopy', (['demand_data'], {}), '(demand_data)\n', (4563, 4576), False, 'import copy\n'), ((16820, 16844), 'copy.deepcopy', 'copy.deepcopy', (['load_rate'], {}), '(load_rate)\n', (16833, 16844), False, 'import copy\n'), ((29289, 29313), 'copy.deepcopy', 'copy.deepcopy', (['load_rate'], {}), '(load_rate)\n', (29302, 29313), False, 'import copy\n'), ((33253, 33281), 'random.choice', 'random.choice', (['event_indices'], {}), '(event_indices)\n', (33266, 33281), False, 'import random\n'), ((3596, 3619), 'numpy.argsort', 'np.argsort', (['event_times'], {}), '(event_times)\n', (3606, 3619), True, 'import numpy as np\n'), ((3621, 3641), 'numpy.sort', 'np.sort', (['event_times'], {}), '(event_times)\n', (3628, 3641), True, 'import numpy as np\n'), ((12401, 12438), 'numpy.argsort', 'np.argsort', (["demand_data['event_time']"], {}), "(demand_data['event_time'])\n", (12411, 12438), True, 'import numpy as np\n'), ((12440, 12474), 'numpy.sort', 'np.sort', (["demand_data['event_time']"], {}), "(demand_data['event_time'])\n", (12447, 12474), True, 'import numpy as np\n'), ((13009, 13033), 'collections.defaultdict', 'defaultdict', (['nested_dict'], {}), '(nested_dict)\n', (13020, 13033), False, 'from collections import defaultdict\n'), ((3714, 3732), 'numpy.array', 'np.array', (['flow_ids'], {}), '(flow_ids)\n', (3722, 3732), True, 'import numpy as np\n'), ((12505, 12537), 'numpy.array', 'np.array', (["demand_data['flow_id']"], {}), "(demand_data['flow_id'])\n", (12513, 12537), True, 'import numpy as np\n'), ((33997, 34034), 'numpy.delete', 'np.delete', (['new_data', 'flow_idx_to_drop'], {}), '(new_data, flow_idx_to_drop)\n', (34006, 34034), True, 'import numpy as np\n')] |
import logging
import numpy as np
import tensorflow as tf
from tf_agents.specs import TensorSpec
from envs.utils import Epsilon, TFUniformReplayBufferWrapper
class BaseEnvWrapper:
def __init__(
self,
env,
eval_env,
name,
in_interactor,
out_interactor,
replay_buffer_size=10_000,
replay_buffer_initialization_size=1000,
epsilon_initial_value=1.0,
epsilon_end_value=0.1,
epsilon_decay_steps=10_000,
epsilon_power=1,
):
self.name = name
self.env = env
self.eval_env = eval_env
self.number_of_actions = self.env.action_spec().maximum + 1
self._epislon = Epsilon(
initial_value=epsilon_initial_value,
end_value=epsilon_end_value,
decay_steps=epsilon_decay_steps,
power=epsilon_power,
identifier=f"Epsilon ({self.name})",
)
self._replay_buffer_size = replay_buffer_size
self._replay_buffer_batch_size = self.env.batch_size
self._replay_buffer = TFUniformReplayBufferWrapper(
self._replay_buffer_size,
self._replay_buffer_batch_size,
(
self.env.observation_spec(),
TensorSpec(shape=(), dtype=tf.int32),
TensorSpec(shape=(), dtype=tf.float32),
self.env.observation_spec(),
TensorSpec(shape=(), dtype=tf.bool),
),
self.name,
)
self.in_interactor = in_interactor
self.out_interactor = out_interactor
logging.debug(
f"{self.__class__.__name__}, {self.name}: Object created."
)
self.init_replay_buffer(replay_buffer_initialization_size)
def init_replay_buffer(self, size):
logging.info(
f"Initializing replay buffer of {self.name} with size {size}."
)
for _ in range(size):
cur_obs = self.env.current_time_step()
action = np.random.randint(0, self.number_of_actions)
action = tf.convert_to_tensor(action, dtype=tf.int32)
next_obs = self.env.step(action)
self._replay_buffer.add_batch(
(
cur_obs.observation,
tf.expand_dims(action, axis=0),
next_obs.reward,
next_obs.observation,
next_obs.is_last(),
)
)
if next_obs.is_last():
self.env.reset()
def get_batch_from_replay_buffer(self, batch_size):
return self._replay_buffer.sample_batch(batch_size)
def _step_driver(self, action):
cur_obs = self.env.current_time_step()
if np.random.rand() < self._epislon.epsilon:
action = tf.convert_to_tensor(
np.random.randint(0, self.number_of_actions),
dtype=tf.int32,
)
else:
action = tf.convert_to_tensor(action, dtype=tf.int32)
next_obs = self.env.step(action)
self._replay_buffer.add_batch(
(
cur_obs.observation,
tf.expand_dims(action, axis=0),
next_obs.reward,
next_obs.observation,
next_obs.is_last(),
)
)
if next_obs.is_last():
self.env.reset()
| [
"logging.debug",
"numpy.random.rand",
"envs.utils.Epsilon",
"numpy.random.randint",
"tf_agents.specs.TensorSpec",
"tensorflow.convert_to_tensor",
"tensorflow.expand_dims",
"logging.info"
] | [((700, 873), 'envs.utils.Epsilon', 'Epsilon', ([], {'initial_value': 'epsilon_initial_value', 'end_value': 'epsilon_end_value', 'decay_steps': 'epsilon_decay_steps', 'power': 'epsilon_power', 'identifier': 'f"""Epsilon ({self.name})"""'}), "(initial_value=epsilon_initial_value, end_value=epsilon_end_value,\n decay_steps=epsilon_decay_steps, power=epsilon_power, identifier=\n f'Epsilon ({self.name})')\n", (707, 873), False, 'from envs.utils import Epsilon, TFUniformReplayBufferWrapper\n'), ((1607, 1680), 'logging.debug', 'logging.debug', (['f"""{self.__class__.__name__}, {self.name}: Object created."""'], {}), "(f'{self.__class__.__name__}, {self.name}: Object created.')\n", (1620, 1680), False, 'import logging\n'), ((1820, 1896), 'logging.info', 'logging.info', (['f"""Initializing replay buffer of {self.name} with size {size}."""'], {}), "(f'Initializing replay buffer of {self.name} with size {size}.')\n", (1832, 1896), False, 'import logging\n'), ((2021, 2065), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.number_of_actions'], {}), '(0, self.number_of_actions)\n', (2038, 2065), True, 'import numpy as np\n'), ((2087, 2131), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['action'], {'dtype': 'tf.int32'}), '(action, dtype=tf.int32)\n', (2107, 2131), True, 'import tensorflow as tf\n'), ((2762, 2778), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2776, 2778), True, 'import numpy as np\n'), ((2990, 3034), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['action'], {'dtype': 'tf.int32'}), '(action, dtype=tf.int32)\n', (3010, 3034), True, 'import tensorflow as tf\n'), ((1269, 1305), 'tf_agents.specs.TensorSpec', 'TensorSpec', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (1279, 1305), False, 'from tf_agents.specs import TensorSpec\n'), ((1323, 1361), 'tf_agents.specs.TensorSpec', 'TensorSpec', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (1333, 1361), False, 'from tf_agents.specs import TensorSpec\n'), ((1424, 1459), 'tf_agents.specs.TensorSpec', 'TensorSpec', ([], {'shape': '()', 'dtype': 'tf.bool'}), '(shape=(), dtype=tf.bool)\n', (1434, 1459), False, 'from tf_agents.specs import TensorSpec\n'), ((2863, 2907), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.number_of_actions'], {}), '(0, self.number_of_actions)\n', (2880, 2907), True, 'import numpy as np\n'), ((3182, 3212), 'tensorflow.expand_dims', 'tf.expand_dims', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (3196, 3212), True, 'import tensorflow as tf\n'), ((2299, 2329), 'tensorflow.expand_dims', 'tf.expand_dims', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (2313, 2329), True, 'import tensorflow as tf\n')] |
# third party
import numpy as np
import pyarrow as pa
import torch
# relative
from ...core.common.serde.serializable import serializable
from ...experimental_flags import ApacheArrowCompression
from ...experimental_flags import flags
from ...proto.lib.numpy.array_pb2 import NumpyProto
from ..torch.tensor_util import tensor_deserializer
from ..torch.tensor_util import tensor_serializer
SUPPORTED_BOOL_TYPES = [np.bool_]
SUPPORTED_INT_TYPES = [
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
SUPPORTED_FLOAT_TYPES = [
np.float16,
np.float32,
np.float64,
]
SUPPORTED_DTYPES = SUPPORTED_BOOL_TYPES + SUPPORTED_INT_TYPES + SUPPORTED_FLOAT_TYPES
DTYPE_REFACTOR = {
np.dtype("uint16"): np.int16,
np.dtype("uint32"): np.int32,
np.dtype("uint64"): np.int64,
}
def arrow_serialize(obj: np.ndarray) -> bytes:
original_dtype = obj.dtype
apache_arrow = pa.Tensor.from_numpy(obj=obj)
sink = pa.BufferOutputStream()
pa.ipc.write_tensor(apache_arrow, sink)
buffer = sink.getvalue()
if flags.APACHE_ARROW_COMPRESSION is ApacheArrowCompression.NONE:
numpy_bytes = buffer.to_pybytes()
else:
numpy_bytes = pa.compress(
buffer, asbytes=True, codec=flags.APACHE_ARROW_COMPRESSION.value
)
dtype = original_dtype.name
return NumpyProto(
arrow_data=numpy_bytes, dtype=dtype, decompressed_size=buffer.size
)
def arrow_deserialize(proto: NumpyProto) -> np.ndarray:
buf: bytes = bytes(proto.arrow_data)
str_dtype = proto.dtype
original_dtype = np.dtype(str_dtype)
if flags.APACHE_ARROW_COMPRESSION is ApacheArrowCompression.NONE:
reader = pa.BufferReader(buf)
buf = reader.read_buffer()
else:
buf = pa.decompress(
buf,
decompressed_size=proto.decompressed_size,
codec=flags.APACHE_ARROW_COMPRESSION.value,
)
result = pa.ipc.read_tensor(buf)
np_array = result.to_numpy()
np_array.setflags(write=True)
return np_array.astype(original_dtype)
def protobuf_serialize(obj: np.ndarray) -> NumpyProto:
original_dtype = obj.dtype
if original_dtype not in SUPPORTED_DTYPES:
raise NotImplementedError(f"{original_dtype} is not supported")
if original_dtype in DTYPE_REFACTOR:
# store as a signed int, the negative wrap around values convert back to the
# same original unsigned values on the other side
obj = obj.astype(DTYPE_REFACTOR[original_dtype])
# Cloning seems to cause the worker to freeze if the array is larger than around
# 800k in data and since we are serializing it immediately afterwards I don't
# think its needed anyway
# tensor = torch.from_numpy(obj).clone()
tensor = torch.from_numpy(obj)
tensor_bytes = tensor_serializer(tensor)
dtype = original_dtype.name
return NumpyProto(proto_data=tensor_bytes, dtype=dtype)
def protobuf_deserialize(proto: NumpyProto) -> np.ndarray:
tensor = tensor_deserializer(proto.proto_data)
array = tensor.to("cpu").detach().numpy().copy()
str_dtype = proto.dtype
original_dtype = np.dtype(str_dtype)
obj = array.astype(original_dtype)
return obj
def serialize_numpy_array(obj: np.ndarray) -> NumpyProto:
if flags.APACHE_ARROW_TENSOR_SERDE:
return arrow_serialize(obj)
else:
return protobuf_serialize(obj)
def deserialize_numpy_array(proto: NumpyProto) -> np.ndarray:
if proto.HasField("arrow_data"):
return arrow_deserialize(proto)
else:
return protobuf_deserialize(proto)
serializable(generate_wrapper=True)(
wrapped_type=np.ndarray,
import_path="numpy.ndarray",
protobuf_scheme=NumpyProto,
type_object2proto=serialize_numpy_array,
type_proto2object=deserialize_numpy_array,
)
| [
"pyarrow.BufferOutputStream",
"pyarrow.ipc.write_tensor",
"pyarrow.BufferReader",
"pyarrow.decompress",
"torch.from_numpy",
"pyarrow.compress",
"pyarrow.ipc.read_tensor",
"pyarrow.Tensor.from_numpy",
"numpy.dtype"
] | [((752, 770), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (760, 770), True, 'import numpy as np\n'), ((786, 804), 'numpy.dtype', 'np.dtype', (['"""uint32"""'], {}), "('uint32')\n", (794, 804), True, 'import numpy as np\n'), ((820, 838), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (828, 838), True, 'import numpy as np\n'), ((951, 980), 'pyarrow.Tensor.from_numpy', 'pa.Tensor.from_numpy', ([], {'obj': 'obj'}), '(obj=obj)\n', (971, 980), True, 'import pyarrow as pa\n'), ((992, 1015), 'pyarrow.BufferOutputStream', 'pa.BufferOutputStream', ([], {}), '()\n', (1013, 1015), True, 'import pyarrow as pa\n'), ((1020, 1059), 'pyarrow.ipc.write_tensor', 'pa.ipc.write_tensor', (['apache_arrow', 'sink'], {}), '(apache_arrow, sink)\n', (1039, 1059), True, 'import pyarrow as pa\n'), ((1618, 1637), 'numpy.dtype', 'np.dtype', (['str_dtype'], {}), '(str_dtype)\n', (1626, 1637), True, 'import numpy as np\n'), ((1972, 1995), 'pyarrow.ipc.read_tensor', 'pa.ipc.read_tensor', (['buf'], {}), '(buf)\n', (1990, 1995), True, 'import pyarrow as pa\n'), ((2811, 2832), 'torch.from_numpy', 'torch.from_numpy', (['obj'], {}), '(obj)\n', (2827, 2832), False, 'import torch\n'), ((3184, 3203), 'numpy.dtype', 'np.dtype', (['str_dtype'], {}), '(str_dtype)\n', (3192, 3203), True, 'import numpy as np\n'), ((1233, 1310), 'pyarrow.compress', 'pa.compress', (['buffer'], {'asbytes': '(True)', 'codec': 'flags.APACHE_ARROW_COMPRESSION.value'}), '(buffer, asbytes=True, codec=flags.APACHE_ARROW_COMPRESSION.value)\n', (1244, 1310), True, 'import pyarrow as pa\n'), ((1725, 1745), 'pyarrow.BufferReader', 'pa.BufferReader', (['buf'], {}), '(buf)\n', (1740, 1745), True, 'import pyarrow as pa\n'), ((1805, 1915), 'pyarrow.decompress', 'pa.decompress', (['buf'], {'decompressed_size': 'proto.decompressed_size', 'codec': 'flags.APACHE_ARROW_COMPRESSION.value'}), '(buf, decompressed_size=proto.decompressed_size, codec=flags.\n APACHE_ARROW_COMPRESSION.value)\n', (1818, 1915), True, 'import pyarrow as pa\n')] |
"""
Author: <NAME>
Modified: <NAME>
"""
import os
import warnings
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from statsmodels.tools.sm_exceptions import EstimationWarning
from statsmodels.tsa.holtwinters import (ExponentialSmoothing,
SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS)
base, _ = os.path.split(os.path.abspath(__file__))
housing_data = pd.read_csv(os.path.join(base, 'results', 'housing-data.csv'))
housing_data = housing_data.set_index('DATE')
housing_data = housing_data.asfreq('MS')
SEASONALS = ('add', 'mul', None)
TRENDS = ('add', 'mul', None)
def _simple_dbl_exp_smoother(x, alpha, beta, l0, b0, nforecast=0):
"""
Simple, slow, direct implementation of double exp smoothing for testing
"""
n = x.shape[0]
l = np.zeros(n)
b = np.zeros(n)
xhat = np.zeros(n)
f = np.zeros(nforecast)
l[0] = l0
b[0] = b0
# Special case the 0 observations since index -1 is not available
xhat[0] = l0 + b0
l[0] = alpha * x[0] + (1 - alpha) * (l0 + b0)
b[0] = beta * (l[0] - l0) + (1 - beta) * b0
for t in range(1, n):
# Obs in index t is the time t forecast for t + 1
l[t] = alpha * x[t] + (1 - alpha) * (l[t - 1] + b[t - 1])
b[t] = beta * (l[t] - l[t - 1]) + (1 - beta) * b[t - 1]
xhat[1:] = l[0:-1] + b[0:-1]
f[:] = l[-1] + np.arange(1, nforecast + 1) * b[-1]
err = x - xhat
return l, b, f, err, xhat
class TestHoltWinters(object):
@classmethod
def setup_class(cls):
# Changed for backwards compatibility with pandas
# oildata_oil_json = '{"851990400000":446.6565229,"883526400000":454.4733065,"915062400000":455.662974,"946598400000":423.6322388,"978220800000":456.2713279,"1009756800000":440.5880501,"1041292800000":425.3325201,"1072828800000":485.1494479,"1104451200000":506.0481621,"1135987200000":526.7919833,"1167523200000":514.268889,"1199059200000":494.2110193}'
# oildata_oil = pd.read_json(oildata_oil_json, typ='Series').sort_index()
data = [446.65652290000003, 454.47330649999998, 455.66297400000002,
423.63223879999998, 456.27132790000002, 440.58805009999998,
425.33252010000001, 485.14944789999998, 506.04816210000001,
526.79198329999997, 514.26888899999994, 494.21101929999998]
index = ['1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00',
'2005-12-31 00:00:00', '2006-12-31 00:00:00', '2007-12-31 00:00:00']
oildata_oil = pd.Series(data, index)
oildata_oil.index = pd.DatetimeIndex(oildata_oil.index,
freq=pd.infer_freq(oildata_oil.index))
cls.oildata_oil = oildata_oil
# air_ausair_json = '{"662601600000":17.5534,"694137600000":21.8601,"725760000000":23.8866,"757296000000":26.9293,"788832000000":26.8885,"820368000000":28.8314,"851990400000":30.0751,"883526400000":30.9535,"915062400000":30.1857,"946598400000":31.5797,"978220800000":32.577569,"1009756800000":33.477398,"1041292800000":39.021581,"1072828800000":41.386432,"1104451200000":41.596552}'
# air_ausair = pd.read_json(air_ausair_json, typ='Series').sort_index()
data = [17.5534, 21.860099999999999, 23.886600000000001,
26.929300000000001, 26.888500000000001, 28.831399999999999,
30.075099999999999, 30.953499999999998, 30.185700000000001,
31.579699999999999, 32.577568999999997, 33.477398000000001,
39.021580999999998, 41.386431999999999, 41.596552000000003]
index = ['1990-12-31 00:00:00', '1991-12-31 00:00:00', '1992-12-31 00:00:00',
'1993-12-31 00:00:00', '1994-12-31 00:00:00', '1995-12-31 00:00:00',
'1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00']
air_ausair = pd.Series(data, index)
air_ausair.index = pd.DatetimeIndex(air_ausair.index,
freq=pd.infer_freq(air_ausair.index))
cls.air_ausair = air_ausair
# livestock2_livestock_json = '{"31449600000":263.917747,"62985600000":268.307222,"94608000000":260.662556,"126144000000":266.639419,"157680000000":277.515778,"189216000000":283.834045,"220838400000":290.309028,"252374400000":292.474198,"283910400000":300.830694,"315446400000":309.286657,"347068800000":318.331081,"378604800000":329.37239,"410140800000":338.883998,"441676800000":339.244126,"473299200000":328.600632,"504835200000":314.255385,"536371200000":314.459695,"567907200000":321.413779,"599529600000":329.789292,"631065600000":346.385165,"662601600000":352.297882,"694137600000":348.370515,"725760000000":417.562922,"757296000000":417.12357,"788832000000":417.749459,"820368000000":412.233904,"851990400000":411.946817,"883526400000":394.697075,"915062400000":401.49927,"946598400000":408.270468,"978220800000":414.2428}'
# livestock2_livestock = pd.read_json(livestock2_livestock_json, typ='Series').sort_index()
data = [263.91774700000002, 268.30722200000002, 260.662556,
266.63941899999998, 277.51577800000001, 283.834045,
290.30902800000001, 292.474198, 300.83069399999999,
309.28665699999999, 318.33108099999998, 329.37239,
338.88399800000002, 339.24412599999999, 328.60063200000002,
314.25538499999999, 314.45969500000001, 321.41377899999998,
329.78929199999999, 346.38516499999997, 352.29788200000002,
348.37051500000001, 417.56292200000001, 417.12356999999997,
417.749459, 412.233904, 411.94681700000001, 394.69707499999998,
401.49927000000002, 408.27046799999999, 414.24279999999999]
index = ['1970-12-31 00:00:00', '1971-12-31 00:00:00', '1972-12-31 00:00:00',
'1973-12-31 00:00:00', '1974-12-31 00:00:00', '1975-12-31 00:00:00',
'1976-12-31 00:00:00', '1977-12-31 00:00:00', '1978-12-31 00:00:00',
'1979-12-31 00:00:00', '1980-12-31 00:00:00', '1981-12-31 00:00:00',
'1982-12-31 00:00:00', '1983-12-31 00:00:00', '1984-12-31 00:00:00',
'1985-12-31 00:00:00', '1986-12-31 00:00:00', '1987-12-31 00:00:00',
'1988-12-31 00:00:00', '1989-12-31 00:00:00', '1990-12-31 00:00:00',
'1991-12-31 00:00:00', '1992-12-31 00:00:00', '1993-12-31 00:00:00',
'1994-12-31 00:00:00', '1995-12-31 00:00:00', '1996-12-31 00:00:00',
'1997-12-31 00:00:00', '1998-12-31 00:00:00', '1999-12-31 00:00:00',
'2000-12-31 00:00:00']
livestock2_livestock = pd.Series(data, index)
livestock2_livestock.index = pd.DatetimeIndex(
livestock2_livestock.index,
freq=pd.infer_freq(livestock2_livestock.index))
cls.livestock2_livestock = livestock2_livestock
# aust_json = '{"1104537600000":41.727458,"1112313600000":24.04185,"1120176000000":32.328103,"1128124800000":37.328708,"1136073600000":46.213153,"1143849600000":29.346326,"1151712000000":36.48291,"1159660800000":42.977719,"1167609600000":48.901525,"1175385600000":31.180221,"1183248000000":37.717881,"1191196800000":40.420211,"1199145600000":51.206863,"1207008000000":31.887228,"1214870400000":40.978263,"1222819200000":43.772491,"1230768000000":55.558567,"1238544000000":33.850915,"1246406400000":42.076383,"1254355200000":45.642292,"1262304000000":59.76678,"1270080000000":35.191877,"1277942400000":44.319737,"1285891200000":47.913736}'
# aust = pd.read_json(aust_json, typ='Series').sort_index()
data = [41.727457999999999, 24.04185, 32.328102999999999,
37.328707999999999, 46.213152999999998, 29.346326000000001,
36.482909999999997, 42.977719, 48.901524999999999,
31.180221, 37.717880999999998, 40.420211000000002,
51.206862999999998, 31.887228, 40.978262999999998,
43.772491000000002, 55.558566999999996, 33.850915000000001,
42.076383, 45.642291999999998, 59.766779999999997,
35.191876999999998, 44.319737000000003, 47.913736]
index = ['2005-03-01 00:00:00', '2005-06-01 00:00:00', '2005-09-01 00:00:00',
'2005-12-01 00:00:00', '2006-03-01 00:00:00', '2006-06-01 00:00:00',
'2006-09-01 00:00:00', '2006-12-01 00:00:00', '2007-03-01 00:00:00',
'2007-06-01 00:00:00', '2007-09-01 00:00:00', '2007-12-01 00:00:00',
'2008-03-01 00:00:00', '2008-06-01 00:00:00', '2008-09-01 00:00:00',
'2008-12-01 00:00:00', '2009-03-01 00:00:00', '2009-06-01 00:00:00',
'2009-09-01 00:00:00', '2009-12-01 00:00:00', '2010-03-01 00:00:00',
'2010-06-01 00:00:00', '2010-09-01 00:00:00', '2010-12-01 00:00:00']
aust = pd.Series(data, index)
aust.index = pd.DatetimeIndex(aust.index,
freq=pd.infer_freq(aust.index))
cls.aust = aust
def test_predict(self):
fit1 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='mul').fit()
fit2 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='mul').fit()
# fit3 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
# seasonal='mul').fit(remove_bias=True, use_basinhopping=True)
assert_almost_equal(fit1.predict('2011-03-01 00:00:00',
'2011-12-01 00:00:00'),
[61.3083, 37.3730, 46.9652, 51.5578], 3)
assert_almost_equal(fit2.predict(end='2011-12-01 00:00:00'),
[61.3083, 37.3730, 46.9652, 51.5578], 3)
# assert_almost_equal(fit3.predict('2010-10-01 00:00:00', '2010-10-01 00:00:00'), [49.087], 3)
def test_ndarray(self):
fit1 = ExponentialSmoothing(self.aust.values, seasonal_periods=4,
trend='add', seasonal='mul').fit()
assert_almost_equal(fit1.forecast(4), [61.3083, 37.3730, 46.9652, 51.5578], 3)
@pytest.mark.xfail(reason='Optimizer does not converge')
def test_forecast(self):
fit1 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='add').fit()
assert_almost_equal(fit1.forecast(steps=4),
[60.9542, 36.8505, 46.1628, 50.1272], 3)
def test_simple_exp_smoothing(self):
fit1 = SimpleExpSmoothing(self.oildata_oil).fit(0.2, optimized=False)
fit2 = SimpleExpSmoothing(self.oildata_oil).fit(0.6, optimized=False)
fit3 = SimpleExpSmoothing(self.oildata_oil).fit()
assert_almost_equal(fit1.forecast(1), [484.802468], 4)
assert_almost_equal(fit1.level,
[446.65652290, 448.21987962, 449.7084985,
444.49324656, 446.84886283, 445.59670028,
441.54386424, 450.26498098, 461.4216172,
474.49569042, 482.45033014, 484.80246797], 4)
assert_almost_equal(fit2.forecast(1), [501.837461], 4)
assert_almost_equal(fit3.forecast(1), [496.493543], 4)
assert_almost_equal(fit3.params['smoothing_level'], 0.891998, 4)
# has to be 3 for old python2.7 scipy versions
assert_almost_equal(fit3.params['initial_level'], 447.478440, 3)
def test_holt(self):
fit1 = Holt(self.air_ausair).fit(smoothing_level=0.8,
smoothing_slope=0.2, optimized=False)
fit2 = Holt(self.air_ausair, exponential=True).fit(
smoothing_level=0.8, smoothing_slope=0.2,
optimized=False)
fit3 = Holt(self.air_ausair, damped=True).fit(smoothing_level=0.8,
smoothing_slope=0.2)
assert_almost_equal(fit1.forecast(5), [43.76, 45.59, 47.43, 49.27, 51.10], 2)
assert_almost_equal(fit1.slope,
[3.617628, 3.59006512, 3.33438212, 3.23657639, 2.69263502,
2.46388914, 2.2229097, 1.95959226, 1.47054601, 1.3604894,
1.28045881, 1.20355193, 1.88267152, 2.09564416, 1.83655482], 4)
assert_almost_equal(fit1.fittedfcast,
[21.8601, 22.032368, 25.48461872, 27.54058587,
30.28813356, 30.26106173, 31.58122149, 32.599234,
33.24223906, 32.26755382, 33.07776017, 33.95806605,
34.77708354, 40.05535303, 43.21586036, 43.75696849], 4)
assert_almost_equal(fit2.forecast(5),
[44.60, 47.24, 50.04, 53.01, 56.15], 2)
assert_almost_equal(fit3.forecast(5),
[42.85, 43.81, 44.66, 45.41, 46.06], 2)
def test_holt_damp(self):
fit1 = SimpleExpSmoothing(self.livestock2_livestock).fit()
mod4 = Holt(self.livestock2_livestock, damped=True)
fit4 = mod4.fit(damping_slope=0.98)
mod5 = Holt(self.livestock2_livestock, exponential=True, damped=True)
fit5 = mod5.fit()
# We accept the below values as we getting a better SSE than text book
assert_almost_equal(fit1.params['smoothing_level'], 1.00, 2)
assert_almost_equal(fit1.params['smoothing_slope'], np.NaN, 2)
assert_almost_equal(fit1.params['damping_slope'], np.NaN, 2)
assert_almost_equal(fit1.params['initial_level'], 263.92, 2)
assert_almost_equal(fit1.params['initial_slope'], np.NaN, 2)
assert_almost_equal(fit1.sse, 6761.35, 2) # 6080.26
assert_almost_equal(fit4.params['smoothing_level'], 0.98, 2)
assert_almost_equal(fit4.params['smoothing_slope'], 0.00, 2)
assert_almost_equal(fit4.params['damping_slope'], 0.98, 2)
assert_almost_equal(fit4.params['initial_level'], 257.36, 2)
assert_almost_equal(fit4.params['initial_slope'], 6.51, 2)
assert_almost_equal(fit4.sse, 6036.56, 2) # 6080.26
assert_almost_equal(fit5.params['smoothing_level'], 0.97, 2)
assert_almost_equal(fit5.params['smoothing_slope'], 0.00, 2)
assert_almost_equal(fit5.params['damping_slope'], 0.98, 2)
assert_almost_equal(fit5.params['initial_level'], 258.95, 2)
assert_almost_equal(fit5.params['initial_slope'], 1.02, 2)
assert_almost_equal(fit5.sse, 6082.00, 2) # 6100.11
def test_hw_seasonal(self):
fit1 = ExponentialSmoothing(self.aust, seasonal_periods=4,
trend='additive',
seasonal='additive').fit(use_boxcox=True)
fit2 = ExponentialSmoothing(self.aust, seasonal_periods=4, trend='add',
seasonal='mul').fit(use_boxcox=True)
assert_almost_equal(fit1.forecast(8),
[61.34, 37.24, 46.84, 51.01, 64.47, 39.78, 49.64, 53.90],
2)
assert_almost_equal(fit2.forecast(8),
[60.97, 36.99, 46.71, 51.48, 64.46, 39.02, 49.29, 54.32],
2)
fit5 = ExponentialSmoothing(self.aust, seasonal_periods=4,
trend='mul', seasonal='add'
).fit(use_boxcox='log')
fit6 = ExponentialSmoothing(self.aust, seasonal_periods=4,
trend='multiplicative',
seasonal='multiplicative'
).fit(use_boxcox='log')
# Skip since estimator is unstable
# assert_almost_equal(fit5.forecast(1), [60.60], 2)
# assert_almost_equal(fit6.forecast(1), [61.47], 2)
@pytest.mark.xpass(reason='Optimizer does not converge')
def test_hw_seasonal_buggy(self):
fit3 = ExponentialSmoothing(self.aust, seasonal_periods=4,
seasonal='add').fit(use_boxcox=True)
assert_almost_equal(fit3.forecast(8),
[59.91, 35.71, 44.64, 47.62, 59.91, 35.71, 44.64, 47.62],
2)
fit4 = ExponentialSmoothing(self.aust, seasonal_periods=4,
seasonal='mul').fit(use_boxcox=True)
assert_almost_equal(fit4.forecast(8),
[60.71, 35.70, 44.63, 47.55, 60.71, 35.70, 44.63, 47.55],
2)
@pytest.mark.parametrize('trend_seasonal', (('mul', None), (None, 'mul'), ('mul', 'mul')))
def test_negative_multipliative(trend_seasonal):
trend, seasonal = trend_seasonal
y = -np.ones(100)
with pytest.raises(ValueError):
ExponentialSmoothing(y, trend=trend, seasonal=seasonal, seasonal_periods=10)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_dampen_no_trend(seasonal):
y = -np.ones(100)
with pytest.raises(ValueError):
ExponentialSmoothing(housing_data, trend=False, seasonal=seasonal, damped=True,
seasonal_periods=10)
@pytest.mark.parametrize('seasonal', ('add', 'mul'))
def test_invalid_seasonal(seasonal):
y = pd.Series(-np.ones(100),index=pd.date_range('2000-1-1', periods=100, freq='MS'))
with pytest.raises(ValueError):
ExponentialSmoothing(y, seasonal=seasonal, seasonal_periods=1)
def test_2d_data():
with pytest.raises(ValueError):
ExponentialSmoothing(pd.concat([housing_data, housing_data], 1)).fit()
def test_infer_freq():
hd2 = housing_data.copy()
hd2.index = list(hd2.index)
with warnings.catch_warnings(record=True) as w:
mod = ExponentialSmoothing(hd2, trend='add', seasonal='add')
assert len(w) == 1
assert 'ValueWarning' in str(w[0])
assert mod.seasonal_periods == 12
@pytest.mark.parametrize('trend', TRENDS)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_start_params(trend, seasonal):
mod = ExponentialSmoothing(housing_data, trend='add', seasonal='add')
res = mod.fit()
res2 = mod.fit(start_params=res.mle_retvals.x)
assert res2.sse <= res.sse
def test_no_params_to_optimize():
mod = ExponentialSmoothing(housing_data)
with pytest.warns(EstimationWarning):
mod.fit(smoothing_level=0.5, initial_level=housing_data.iloc[0])
def test_invalid_start_param_length():
mod = ExponentialSmoothing(housing_data)
with pytest.raises(ValueError):
mod.fit(start_params=np.array([0.5]))
def test_basin_hopping():
mod = ExponentialSmoothing(housing_data, trend='add')
res = mod.fit()
res2 = mod.fit(use_basinhopping=True)
assert res2.sse <= res.sse
def test_debiased():
mod = ExponentialSmoothing(housing_data, trend='add')
res = mod.fit()
res2 = mod.fit(remove_bias=True)
assert np.any(res.fittedvalues != res2.fittedvalues)
@pytest.mark.parametrize('trend', TRENDS)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_float_boxcox_smoke(trend, seasonal):
res = ExponentialSmoothing(housing_data, trend=trend, seasonal=seasonal).fit(use_boxcox=0.5)
assert_allclose(res.params['use_boxcox'], 0.5)
@pytest.mark.parametrize('trend', TRENDS)
@pytest.mark.parametrize('seasonal', SEASONALS)
def test_equivalence_cython_python(trend, seasonal):
mod = ExponentialSmoothing(housing_data, trend=trend, seasonal=seasonal)
res = mod.fit()
res.summary() # Smoke test
params = res.params
nobs = housing_data.shape[0]
y = np.squeeze(np.asarray(housing_data))
m = 12 if seasonal else 0
l = np.zeros(nobs)
b = np.zeros(nobs)
s = np.zeros(nobs + m - 1)
p = np.zeros(6 + m)
max_seen = np.finfo(np.double).max
alpha = params['smoothing_level']
beta = params['smoothing_slope']
gamma = params['smoothing_seasonal']
phi = params['damping_slope']
phi = 1.0 if np.isnan(phi) else phi
l0 = params['initial_level']
b0 = params['initial_slope']
p[:6] = alpha, beta, gamma, l0, b0, phi
if seasonal:
p[6:] = params['initial_seasons']
xi = np.ones_like(p).astype(np.uint8)
py_func = PY_SMOOTHERS[(seasonal, trend)]
cy_func = SMOOTHERS[(seasonal, trend)]
p_copy = p.copy()
sse_cy = cy_func(p, xi, p_copy, y, l, b, s, m, nobs, max_seen)
sse_py = py_func(p, xi, p_copy, y, l, b, s, m, nobs, max_seen)
assert_allclose(sse_py, sse_cy)
def test_direct_holt_add():
mod = SimpleExpSmoothing(housing_data)
res = mod.fit()
x = np.squeeze(np.asarray(mod.endog))
alpha = res.params['smoothing_level']
l, b, f, err, xhat = _simple_dbl_exp_smoother(x, alpha, beta=0.0,
l0=res.params['initial_level'], b0=0.0,
nforecast=5)
assert_allclose(l, res.level)
assert_allclose(f, res.level.iloc[-1] * np.ones(5))
assert_allclose(f, res.forecast(5))
mod = ExponentialSmoothing(housing_data, trend='add')
res = mod.fit()
x = np.squeeze(np.asarray(mod.endog))
alpha = res.params['smoothing_level']
beta = res.params['smoothing_slope']
l, b, f, err, xhat = _simple_dbl_exp_smoother(x, alpha, beta=beta,
l0=res.params['initial_level'],
b0=res.params['initial_slope'], nforecast=5)
assert_allclose(xhat, res.fittedvalues)
assert_allclose(l + b, res.level + res.slope)
assert_allclose(l, res.level)
assert_allclose(b, res.slope)
assert_allclose(f, res.level.iloc[-1] + res.slope.iloc[-1] * np.array([1, 2, 3, 4, 5]))
assert_allclose(f, res.forecast(5))
| [
"statsmodels.tsa.holtwinters.Holt",
"pandas.infer_freq",
"statsmodels.tsa.holtwinters.SimpleExpSmoothing",
"numpy.array",
"numpy.arange",
"pandas.date_range",
"pytest.mark.xpass",
"statsmodels.tsa.holtwinters.ExponentialSmoothing",
"pytest.mark.xfail",
"numpy.testing.assert_allclose",
"numpy.asa... | [((16997, 17091), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trend_seasonal"""', "(('mul', None), (None, 'mul'), ('mul', 'mul'))"], {}), "('trend_seasonal', (('mul', None), (None, 'mul'), (\n 'mul', 'mul')))\n", (17020, 17091), False, 'import pytest\n'), ((17319, 17365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seasonal"""', 'SEASONALS'], {}), "('seasonal', SEASONALS)\n", (17342, 17365), False, 'import pytest\n'), ((17601, 17652), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seasonal"""', "('add', 'mul')"], {}), "('seasonal', ('add', 'mul'))\n", (17624, 17652), False, 'import pytest\n'), ((18342, 18382), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trend"""', 'TRENDS'], {}), "('trend', TRENDS)\n", (18365, 18382), False, 'import pytest\n'), ((18384, 18430), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seasonal"""', 'SEASONALS'], {}), "('seasonal', SEASONALS)\n", (18407, 18430), False, 'import pytest\n'), ((19388, 19428), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trend"""', 'TRENDS'], {}), "('trend', TRENDS)\n", (19411, 19428), False, 'import pytest\n'), ((19430, 19476), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seasonal"""', 'SEASONALS'], {}), "('seasonal', SEASONALS)\n", (19453, 19476), False, 'import pytest\n'), ((19674, 19714), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trend"""', 'TRENDS'], {}), "('trend', TRENDS)\n", (19697, 19714), False, 'import pytest\n'), ((19716, 19762), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seasonal"""', 'SEASONALS'], {}), "('seasonal', SEASONALS)\n", (19739, 19762), False, 'import pytest\n'), ((426, 451), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (441, 451), False, 'import os\n'), ((480, 529), 'os.path.join', 'os.path.join', (['base', '"""results"""', '"""housing-data.csv"""'], {}), "(base, 'results', 'housing-data.csv')\n", (492, 529), False, 'import os\n'), ((870, 881), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (878, 881), True, 'import numpy as np\n'), ((890, 901), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (898, 901), True, 'import numpy as np\n'), ((913, 924), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (921, 924), True, 'import numpy as np\n'), ((933, 952), 'numpy.zeros', 'np.zeros', (['nforecast'], {}), '(nforecast)\n', (941, 952), True, 'import numpy as np\n'), ((10613, 10668), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Optimizer does not converge"""'}), "(reason='Optimizer does not converge')\n", (10630, 10668), False, 'import pytest\n'), ((16294, 16349), 'pytest.mark.xpass', 'pytest.mark.xpass', ([], {'reason': '"""Optimizer does not converge"""'}), "(reason='Optimizer does not converge')\n", (16311, 16349), False, 'import pytest\n'), ((18481, 18544), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': '"""add"""', 'seasonal': '"""add"""'}), "(housing_data, trend='add', seasonal='add')\n", (18501, 18544), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((18693, 18727), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {}), '(housing_data)\n', (18713, 18727), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((18894, 18928), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {}), '(housing_data)\n', (18914, 18928), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((19049, 19096), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': '"""add"""'}), "(housing_data, trend='add')\n", (19069, 19096), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((19223, 19270), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': '"""add"""'}), "(housing_data, trend='add')\n", (19243, 19270), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((19339, 19384), 'numpy.any', 'np.any', (['(res.fittedvalues != res2.fittedvalues)'], {}), '(res.fittedvalues != res2.fittedvalues)\n', (19345, 19384), True, 'import numpy as np\n'), ((19624, 19670), 'numpy.testing.assert_allclose', 'assert_allclose', (["res.params['use_boxcox']", '(0.5)'], {}), "(res.params['use_boxcox'], 0.5)\n", (19639, 19670), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((19826, 19892), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': 'trend', 'seasonal': 'seasonal'}), '(housing_data, trend=trend, seasonal=seasonal)\n', (19846, 19892), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((20085, 20099), 'numpy.zeros', 'np.zeros', (['nobs'], {}), '(nobs)\n', (20093, 20099), True, 'import numpy as np\n'), ((20108, 20122), 'numpy.zeros', 'np.zeros', (['nobs'], {}), '(nobs)\n', (20116, 20122), True, 'import numpy as np\n'), ((20131, 20153), 'numpy.zeros', 'np.zeros', (['(nobs + m - 1)'], {}), '(nobs + m - 1)\n', (20139, 20153), True, 'import numpy as np\n'), ((20162, 20177), 'numpy.zeros', 'np.zeros', (['(6 + m)'], {}), '(6 + m)\n', (20170, 20177), True, 'import numpy as np\n'), ((20867, 20898), 'numpy.testing.assert_allclose', 'assert_allclose', (['sse_py', 'sse_cy'], {}), '(sse_py, sse_cy)\n', (20882, 20898), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((20939, 20971), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['housing_data'], {}), '(housing_data)\n', (20957, 20971), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((21304, 21333), 'numpy.testing.assert_allclose', 'assert_allclose', (['l', 'res.level'], {}), '(l, res.level)\n', (21319, 21333), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((21441, 21488), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': '"""add"""'}), "(housing_data, trend='add')\n", (21461, 21488), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((21887, 21926), 'numpy.testing.assert_allclose', 'assert_allclose', (['xhat', 'res.fittedvalues'], {}), '(xhat, res.fittedvalues)\n', (21902, 21926), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((21931, 21976), 'numpy.testing.assert_allclose', 'assert_allclose', (['(l + b)', '(res.level + res.slope)'], {}), '(l + b, res.level + res.slope)\n', (21946, 21976), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((21981, 22010), 'numpy.testing.assert_allclose', 'assert_allclose', (['l', 'res.level'], {}), '(l, res.level)\n', (21996, 22010), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((22015, 22044), 'numpy.testing.assert_allclose', 'assert_allclose', (['b', 'res.slope'], {}), '(b, res.slope)\n', (22030, 22044), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((2772, 2794), 'pandas.Series', 'pd.Series', (['data', 'index'], {}), '(data, index)\n', (2781, 2794), True, 'import pandas as pd\n'), ((4273, 4295), 'pandas.Series', 'pd.Series', (['data', 'index'], {}), '(data, index)\n', (4282, 4295), True, 'import pandas as pd\n'), ((7079, 7101), 'pandas.Series', 'pd.Series', (['data', 'index'], {}), '(data, index)\n', (7088, 7101), True, 'import pandas as pd\n'), ((9285, 9307), 'pandas.Series', 'pd.Series', (['data', 'index'], {}), '(data, index)\n', (9294, 9307), True, 'import pandas as pd\n'), ((11284, 11495), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fit1.level', '[446.6565229, 448.21987962, 449.7084985, 444.49324656, 446.84886283, \n 445.59670028, 441.54386424, 450.26498098, 461.4216172, 474.49569042, \n 482.45033014, 484.80246797]', '(4)'], {}), '(fit1.level, [446.6565229, 448.21987962, 449.7084985, \n 444.49324656, 446.84886283, 445.59670028, 441.54386424, 450.26498098, \n 461.4216172, 474.49569042, 482.45033014, 484.80246797], 4)\n', (11303, 11495), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((11736, 11800), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit3.params['smoothing_level']", '(0.891998)', '(4)'], {}), "(fit3.params['smoothing_level'], 0.891998, 4)\n", (11755, 11800), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((11864, 11927), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit3.params['initial_level']", '(447.47844)', '(3)'], {}), "(fit3.params['initial_level'], 447.47844, 3)\n", (11883, 11927), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((12483, 12705), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fit1.slope', '[3.617628, 3.59006512, 3.33438212, 3.23657639, 2.69263502, 2.46388914, \n 2.2229097, 1.95959226, 1.47054601, 1.3604894, 1.28045881, 1.20355193, \n 1.88267152, 2.09564416, 1.83655482]', '(4)'], {}), '(fit1.slope, [3.617628, 3.59006512, 3.33438212, \n 3.23657639, 2.69263502, 2.46388914, 2.2229097, 1.95959226, 1.47054601, \n 1.3604894, 1.28045881, 1.20355193, 1.88267152, 2.09564416, 1.83655482], 4)\n', (12502, 12705), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((12790, 13047), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fit1.fittedfcast', '[21.8601, 22.032368, 25.48461872, 27.54058587, 30.28813356, 30.26106173, \n 31.58122149, 32.599234, 33.24223906, 32.26755382, 33.07776017, \n 33.95806605, 34.77708354, 40.05535303, 43.21586036, 43.75696849]', '(4)'], {}), '(fit1.fittedfcast, [21.8601, 22.032368, 25.48461872, \n 27.54058587, 30.28813356, 30.26106173, 31.58122149, 32.599234, \n 33.24223906, 32.26755382, 33.07776017, 33.95806605, 34.77708354, \n 40.05535303, 43.21586036, 43.75696849], 4)\n', (12809, 13047), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((13489, 13533), 'statsmodels.tsa.holtwinters.Holt', 'Holt', (['self.livestock2_livestock'], {'damped': '(True)'}), '(self.livestock2_livestock, damped=True)\n', (13493, 13533), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((13593, 13655), 'statsmodels.tsa.holtwinters.Holt', 'Holt', (['self.livestock2_livestock'], {'exponential': '(True)', 'damped': '(True)'}), '(self.livestock2_livestock, exponential=True, damped=True)\n', (13597, 13655), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((13769, 13828), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit1.params['smoothing_level']", '(1.0)', '(2)'], {}), "(fit1.params['smoothing_level'], 1.0, 2)\n", (13788, 13828), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((13838, 13900), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit1.params['smoothing_slope']", 'np.NaN', '(2)'], {}), "(fit1.params['smoothing_slope'], np.NaN, 2)\n", (13857, 13900), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((13909, 13969), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit1.params['damping_slope']", 'np.NaN', '(2)'], {}), "(fit1.params['damping_slope'], np.NaN, 2)\n", (13928, 13969), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((13978, 14038), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit1.params['initial_level']", '(263.92)', '(2)'], {}), "(fit1.params['initial_level'], 263.92, 2)\n", (13997, 14038), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14047, 14107), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit1.params['initial_slope']", 'np.NaN', '(2)'], {}), "(fit1.params['initial_slope'], np.NaN, 2)\n", (14066, 14107), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14116, 14157), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fit1.sse', '(6761.35)', '(2)'], {}), '(fit1.sse, 6761.35, 2)\n', (14135, 14157), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14178, 14238), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit4.params['smoothing_level']", '(0.98)', '(2)'], {}), "(fit4.params['smoothing_level'], 0.98, 2)\n", (14197, 14238), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14247, 14306), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit4.params['smoothing_slope']", '(0.0)', '(2)'], {}), "(fit4.params['smoothing_slope'], 0.0, 2)\n", (14266, 14306), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14316, 14374), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit4.params['damping_slope']", '(0.98)', '(2)'], {}), "(fit4.params['damping_slope'], 0.98, 2)\n", (14335, 14374), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14383, 14443), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit4.params['initial_level']", '(257.36)', '(2)'], {}), "(fit4.params['initial_level'], 257.36, 2)\n", (14402, 14443), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14452, 14510), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit4.params['initial_slope']", '(6.51)', '(2)'], {}), "(fit4.params['initial_slope'], 6.51, 2)\n", (14471, 14510), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14519, 14560), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fit4.sse', '(6036.56)', '(2)'], {}), '(fit4.sse, 6036.56, 2)\n', (14538, 14560), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14580, 14640), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit5.params['smoothing_level']", '(0.97)', '(2)'], {}), "(fit5.params['smoothing_level'], 0.97, 2)\n", (14599, 14640), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14649, 14708), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit5.params['smoothing_slope']", '(0.0)', '(2)'], {}), "(fit5.params['smoothing_slope'], 0.0, 2)\n", (14668, 14708), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14718, 14776), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit5.params['damping_slope']", '(0.98)', '(2)'], {}), "(fit5.params['damping_slope'], 0.98, 2)\n", (14737, 14776), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14785, 14845), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit5.params['initial_level']", '(258.95)', '(2)'], {}), "(fit5.params['initial_level'], 258.95, 2)\n", (14804, 14845), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14854, 14912), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["fit5.params['initial_slope']", '(1.02)', '(2)'], {}), "(fit5.params['initial_slope'], 1.02, 2)\n", (14873, 14912), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((14921, 14961), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fit5.sse', '(6082.0)', '(2)'], {}), '(fit5.sse, 6082.0, 2)\n', (14940, 14961), False, 'from numpy.testing import assert_almost_equal, assert_allclose\n'), ((17182, 17194), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (17189, 17194), True, 'import numpy as np\n'), ((17204, 17229), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17217, 17229), False, 'import pytest\n'), ((17239, 17315), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['y'], {'trend': 'trend', 'seasonal': 'seasonal', 'seasonal_periods': '(10)'}), '(y, trend=trend, seasonal=seasonal, seasonal_periods=10)\n', (17259, 17315), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((17411, 17423), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (17418, 17423), True, 'import numpy as np\n'), ((17433, 17458), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17446, 17458), False, 'import pytest\n'), ((17468, 17573), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': '(False)', 'seasonal': 'seasonal', 'damped': '(True)', 'seasonal_periods': '(10)'}), '(housing_data, trend=False, seasonal=seasonal, damped=\n True, seasonal_periods=10)\n', (17488, 17573), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((17788, 17813), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17801, 17813), False, 'import pytest\n'), ((17823, 17885), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['y'], {'seasonal': 'seasonal', 'seasonal_periods': '(1)'}), '(y, seasonal=seasonal, seasonal_periods=1)\n', (17843, 17885), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((17917, 17942), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17930, 17942), False, 'import pytest\n'), ((18119, 18155), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (18142, 18155), False, 'import warnings\n'), ((18176, 18230), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['hd2'], {'trend': '"""add"""', 'seasonal': '"""add"""'}), "(hd2, trend='add', seasonal='add')\n", (18196, 18230), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((18737, 18768), 'pytest.warns', 'pytest.warns', (['EstimationWarning'], {}), '(EstimationWarning)\n', (18749, 18768), False, 'import pytest\n'), ((18938, 18963), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18951, 18963), False, 'import pytest\n'), ((20021, 20045), 'numpy.asarray', 'np.asarray', (['housing_data'], {}), '(housing_data)\n', (20031, 20045), True, 'import numpy as np\n'), ((20193, 20212), 'numpy.finfo', 'np.finfo', (['np.double'], {}), '(np.double)\n', (20201, 20212), True, 'import numpy as np\n'), ((20384, 20397), 'numpy.isnan', 'np.isnan', (['phi'], {}), '(phi)\n', (20392, 20397), True, 'import numpy as np\n'), ((21011, 21032), 'numpy.asarray', 'np.asarray', (['mod.endog'], {}), '(mod.endog)\n', (21021, 21032), True, 'import numpy as np\n'), ((21528, 21549), 'numpy.asarray', 'np.asarray', (['mod.endog'], {}), '(mod.endog)\n', (21538, 21549), True, 'import numpy as np\n'), ((1438, 1465), 'numpy.arange', 'np.arange', (['(1)', '(nforecast + 1)'], {}), '(1, nforecast + 1)\n', (1447, 1465), True, 'import numpy as np\n'), ((17709, 17721), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (17716, 17721), True, 'import numpy as np\n'), ((17728, 17777), 'pandas.date_range', 'pd.date_range', (['"""2000-1-1"""'], {'periods': '(100)', 'freq': '"""MS"""'}), "('2000-1-1', periods=100, freq='MS')\n", (17741, 17777), True, 'import pandas as pd\n'), ((19533, 19599), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['housing_data'], {'trend': 'trend', 'seasonal': 'seasonal'}), '(housing_data, trend=trend, seasonal=seasonal)\n', (19553, 19599), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((20585, 20600), 'numpy.ones_like', 'np.ones_like', (['p'], {}), '(p)\n', (20597, 20600), True, 'import numpy as np\n'), ((21378, 21388), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (21385, 21388), True, 'import numpy as np\n'), ((2909, 2941), 'pandas.infer_freq', 'pd.infer_freq', (['oildata_oil.index'], {}), '(oildata_oil.index)\n', (2922, 2941), True, 'import pandas as pd\n'), ((4407, 4438), 'pandas.infer_freq', 'pd.infer_freq', (['air_ausair.index'], {}), '(air_ausair.index)\n', (4420, 4438), True, 'import pandas as pd\n'), ((7214, 7255), 'pandas.infer_freq', 'pd.infer_freq', (['livestock2_livestock.index'], {}), '(livestock2_livestock.index)\n', (7227, 7255), True, 'import pandas as pd\n'), ((9401, 9426), 'pandas.infer_freq', 'pd.infer_freq', (['aust.index'], {}), '(aust.index)\n', (9414, 9426), True, 'import pandas as pd\n'), ((9496, 9581), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""add"""', 'seasonal': '"""mul"""'}), "(self.aust, seasonal_periods=4, trend='add', seasonal='mul'\n )\n", (9516, 9581), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((9634, 9719), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""add"""', 'seasonal': '"""mul"""'}), "(self.aust, seasonal_periods=4, trend='add', seasonal='mul'\n )\n", (9654, 9719), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((10390, 10481), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust.values'], {'seasonal_periods': '(4)', 'trend': '"""add"""', 'seasonal': '"""mul"""'}), "(self.aust.values, seasonal_periods=4, trend='add',\n seasonal='mul')\n", (10410, 10481), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((10713, 10798), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""add"""', 'seasonal': '"""add"""'}), "(self.aust, seasonal_periods=4, trend='add', seasonal='add'\n )\n", (10733, 10798), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((11014, 11050), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['self.oildata_oil'], {}), '(self.oildata_oil)\n', (11032, 11050), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((11092, 11128), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['self.oildata_oil'], {}), '(self.oildata_oil)\n', (11110, 11128), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((11170, 11206), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['self.oildata_oil'], {}), '(self.oildata_oil)\n', (11188, 11206), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((11970, 11991), 'statsmodels.tsa.holtwinters.Holt', 'Holt', (['self.air_ausair'], {}), '(self.air_ausair)\n', (11974, 11991), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((12111, 12150), 'statsmodels.tsa.holtwinters.Holt', 'Holt', (['self.air_ausair'], {'exponential': '(True)'}), '(self.air_ausair, exponential=True)\n', (12115, 12150), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((12254, 12288), 'statsmodels.tsa.holtwinters.Holt', 'Holt', (['self.air_ausair'], {'damped': '(True)'}), '(self.air_ausair, damped=True)\n', (12258, 12288), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((13422, 13467), 'statsmodels.tsa.holtwinters.SimpleExpSmoothing', 'SimpleExpSmoothing', (['self.livestock2_livestock'], {}), '(self.livestock2_livestock)\n', (13440, 13467), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((15022, 15116), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""additive"""', 'seasonal': '"""additive"""'}), "(self.aust, seasonal_periods=4, trend='additive',\n seasonal='additive')\n", (15042, 15116), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((15221, 15306), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""add"""', 'seasonal': '"""mul"""'}), "(self.aust, seasonal_periods=4, trend='add', seasonal='mul'\n )\n", (15241, 15306), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((15700, 15785), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""mul"""', 'seasonal': '"""add"""'}), "(self.aust, seasonal_periods=4, trend='mul', seasonal='add'\n )\n", (15720, 15785), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((15891, 15997), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'trend': '"""multiplicative"""', 'seasonal': '"""multiplicative"""'}), "(self.aust, seasonal_periods=4, trend='multiplicative',\n seasonal='multiplicative')\n", (15911, 15997), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((16403, 16470), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'seasonal': '"""add"""'}), "(self.aust, seasonal_periods=4, seasonal='add')\n", (16423, 16470), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((16706, 16773), 'statsmodels.tsa.holtwinters.ExponentialSmoothing', 'ExponentialSmoothing', (['self.aust'], {'seasonal_periods': '(4)', 'seasonal': '"""mul"""'}), "(self.aust, seasonal_periods=4, seasonal='mul')\n", (16726, 16773), False, 'from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS\n'), ((18994, 19009), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (19002, 19009), True, 'import numpy as np\n'), ((22110, 22135), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (22118, 22135), True, 'import numpy as np\n'), ((17973, 18015), 'pandas.concat', 'pd.concat', (['[housing_data, housing_data]', '(1)'], {}), '([housing_data, housing_data], 1)\n', (17982, 18015), True, 'import pandas as pd\n')] |
import tensorflow as tf
from baconian.core.core import Basic, EnvSpec
import numpy as np
import abc
from baconian.core.parameters import Parameters
from typeguard import typechecked
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian as tf_batch_jacobian
from baconian.common.logging import Recorder
from baconian.core.status import register_counter_info_to_status_decorator, StatusWithSingleInfo
from baconian.common.logging import ConsoleLogger
from baconian.common.error import *
from baconian.core.core import EnvSpec, Env
from baconian.algo.dynamics.reward_func.reward_func import RewardFunc
from baconian.algo.dynamics.terminal_func.terminal_func import TerminalFunc
from baconian.common.data_pre_processing import DataScaler, IdenticalDataScaler
class DynamicsModel(Basic):
STATUS_LIST = ('CREATED', 'INITED')
INIT_STATUS = 'CREATED'
def __init__(self, env_spec: EnvSpec, parameters: Parameters = None, init_state=None, name='dynamics_model',
state_input_scaler: DataScaler = None,
action_input_scaler: DataScaler = None,
state_output_scaler: DataScaler = None):
"""
:param env_spec: environment specifications, such as observation space and action space
:type env_spec: EnvSpec
:param parameters: parameters
:type parameters: Parameters
:param init_state: initial state of dymamics model
:type init_state: str
:param name: name of instance, 'dynamics_model' by default
:type name: str
:param state_input_scaler: data preprocessing scaler of state input
:type state_input_scaler: DataScaler
:param action_input_scaler: data preprocessing scaler of action input
:type action_input_scaler: DataScaler
:param state_output_scaler: data preprocessing scaler of state output
:type state_output_scaler: DataScaler
"""
super().__init__(name=name)
self.env_spec = env_spec
self.state = init_state
self.parameters = parameters
self.state_input = None
self.action_input = None
self.new_state_output = None
self.recorder = Recorder(flush_by_split_status=False, default_obj=self)
self._status = StatusWithSingleInfo(obj=self)
self.state_input_scaler = state_input_scaler if state_input_scaler else IdenticalDataScaler(
dims=env_spec.flat_obs_dim)
self.action_input_scaler = action_input_scaler if action_input_scaler else IdenticalDataScaler(
dims=env_spec.flat_action_dim)
self.state_output_scaler = state_output_scaler if state_output_scaler else IdenticalDataScaler(
dims=env_spec.flat_obs_dim)
def init(self, *args, **kwargs):
self.set_status('INITED')
self.state = self.env_spec.obs_space.sample()
@register_counter_info_to_status_decorator(increment=1, info_key='step_counter')
def step(self, action: np.ndarray, state=None, allow_clip=False, **kwargs_for_transit):
"""
State transition function (only support one sample transition instead of batch data)
:param action: action to be taken
:type action: np.ndarray
:param state: current state, if None, will use stored state (saved from last transition)
:type state: np.ndarray
:param allow_clip: allow clip of observation space, default False
:type allow_clip: bool
:param kwargs_for_transit: extra kwargs for calling the _state_transit, this is typically related to the
specific mode you used
:type kwargs_for_transit:
:return: new state after step
:rtype: np.ndarray
"""
state = np.array(state).reshape(self.env_spec.obs_shape) if state is not None else self.state
action = action.reshape(self.env_spec.action_shape)
if allow_clip is True:
if state is not None:
state = self.env_spec.obs_space.clip(state)
action = self.env_spec.action_space.clip(action)
if self.env_spec.action_space.contains(action) is False:
raise StateOrActionOutOfBoundError(
'action {} out of bound of {}'.format(action, self.env_spec.action_space.bound()))
# if self.env_spec.obs_space.contains(state) is False:
# raise StateOrActionOutOfBoundError(
# 'state {} out of bound of {}'.format(state, self.env_spec.obs_space.bound()))
new_state = self._state_transit(state=state, action=self.env_spec.flat_action(action),
**kwargs_for_transit)
if allow_clip is True:
new_state = self.env_spec.obs_space.clip(new_state)
if self.env_spec.obs_space.contains(new_state) is False:
raise StateOrActionOutOfBoundError(
'new state {} out of bound of {}'.format(new_state, self.env_spec.obs_space.bound()))
self.state = new_state
return new_state
@abc.abstractmethod
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
"""
:param state: original state
:type state: np.ndarray
:param action: action taken by agent
:type action: np.ndarray
:param kwargs:
:type kwargs:
:return: new state after transition
:rtype: np.ndarray
"""
raise NotImplementedError
def copy_from(self, obj) -> bool:
"""
:param obj: object to copy from
:type obj:
:return: True if successful else raise an error
:rtype: bool
"""
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
return True
def make_copy(self):
""" Make a copy of parameters and environment specifications."""
raise NotImplementedError
def reset_state(self, state=None):
"""
:param state: original state
:type state: np.ndarray
:return: a random sample space in observation space
:rtype: np.ndarray
"""
if state is not None:
#assert self.env_spec.obs_space.contains(state)
self.state = state
else:
self.state = self.env_spec.obs_space.sample()
def return_as_env(self) -> Env:
"""
:return: an environment with this dynamics model
:rtype: DynamicsEnvWrapper
"""
return DynamicsEnvWrapper(dynamics=self,
name=self._name + '_env')
class LocalDyanmicsModel(DynamicsModel):
pass
class GlobalDynamicsModel(DynamicsModel):
pass
class TrainableDyanmicsModel(object):
def train(self, *args, **kwargs):
raise NotImplementedError
class DifferentiableDynamics(object):
@typechecked
def __init__(self, input_node_dict: dict, output_node_dict: dict):
for node in input_node_dict.values():
if not isinstance(node, tf.Tensor):
raise TypeError('Derivable only support tf.Tensor as node')
for node in output_node_dict.values():
if not isinstance(node, tf.Tensor):
raise TypeError('Derivable only support tf.Tensor as node')
self.input_node_dict = input_node_dict
self.output_node_dict = output_node_dict
self.output_node_list = []
for key in output_node_dict.keys():
self.output_node_list.append(output_node_dict[key])
self._grad_dict = [{}, {}, {}]
for val in input_node_dict:
self._grad_dict[0][val] = self.output_node_list
def grad_on_input(self, key_or_node: (str, tf.Tensor), order=1, batch_flag=False):
if batch_flag:
raise NotImplementedError
node = key_or_node if isinstance(key_or_node, tf.Tensor) else self.input_node_dict[key_or_node]
if node not in self._grad_dict:
if order == 1:
grad_op = [tf_batch_jacobian(output=o_node, inp=node) for o_node in self.output_node_list]
else:
grad_op = [self.split_and_hessian(out_node=o_node, innode=node) for o_node in self.output_node_list]
self._grad_dict[order][node] = grad_op
return grad_op
else:
return self._grad_dict[order][node]
def split_and_hessian(self, out_node, innode):
out_nodes = tf.split(out_node, 1, axis=1)
hessian_node = []
for o_node in out_nodes:
hessian_node.append(tf.stack(tf.hessians(o_node, innode)))
new_dim = len(hessian_node[0].shape.as_list()) + 1
new_dim = list(range(new_dim))
new_dim[0] = 1
new_dim[1] = 0
return tf.transpose(tf.stack(hessian_node), perm=new_dim)
class DynamicsEnvWrapper(Env):
"""
A wrapper that wrap the dynamics into a standard baconian env
"""
@typechecked
def __init__(self, dynamics: DynamicsModel, name: str = 'dynamics_env'):
super().__init__(name)
self._dynamics = dynamics
self._reward_func = None
self._terminal_func = None
self.env_spec = dynamics.env_spec
def step(self, action: np.ndarray, **kwargs):
super().step(action)
state = self.get_state() if 'state' not in kwargs else kwargs['state']
new_state = self._dynamics.step(action=action, **kwargs)
re = self._reward_func(state=state, new_state=new_state, action=action)
terminal = self._terminal_func(state=state, action=action, new_state=new_state)
return new_state, re, terminal, ()
def reset(self):
super(DynamicsEnvWrapper, self).reset()
self._dynamics.reset_state()
return self.get_state()
def init(self):
super().init()
self._dynamics.init()
def get_state(self):
return self._dynamics.state
def seed(self, seed=None):
ConsoleLogger().print('warning', 'seed on dynamics model has no effect ')
pass
def save(self, *args, **kwargs):
return self._dynamics.save(*args, **kwargs)
def load(self, *args, **kwargs):
return self._dynamics.load(*args, **kwargs)
def set_terminal_reward_func(self, terminal_func: TerminalFunc, reward_func: RewardFunc):
self._terminal_func = terminal_func
self._reward_func = reward_func
class DynamicsPriorModel(Basic):
def __init__(self, env_spec: EnvSpec, parameters: Parameters, name: str):
super().__init__(name=name)
self.env_spec = env_spec
self.parameters = parameters
| [
"baconian.core.status.StatusWithSingleInfo",
"baconian.common.logging.ConsoleLogger",
"baconian.core.status.register_counter_info_to_status_decorator",
"baconian.common.data_pre_processing.IdenticalDataScaler",
"tensorflow.python.ops.parallel_for.gradients.batch_jacobian",
"tensorflow.split",
"baconian.... | [((2872, 2951), 'baconian.core.status.register_counter_info_to_status_decorator', 'register_counter_info_to_status_decorator', ([], {'increment': '(1)', 'info_key': '"""step_counter"""'}), "(increment=1, info_key='step_counter')\n", (2913, 2951), False, 'from baconian.core.status import register_counter_info_to_status_decorator, StatusWithSingleInfo\n'), ((2198, 2253), 'baconian.common.logging.Recorder', 'Recorder', ([], {'flush_by_split_status': '(False)', 'default_obj': 'self'}), '(flush_by_split_status=False, default_obj=self)\n', (2206, 2253), False, 'from baconian.common.logging import Recorder\n'), ((2277, 2307), 'baconian.core.status.StatusWithSingleInfo', 'StatusWithSingleInfo', ([], {'obj': 'self'}), '(obj=self)\n', (2297, 2307), False, 'from baconian.core.status import register_counter_info_to_status_decorator, StatusWithSingleInfo\n'), ((8475, 8504), 'tensorflow.split', 'tf.split', (['out_node', '(1)'], {'axis': '(1)'}), '(out_node, 1, axis=1)\n', (8483, 8504), True, 'import tensorflow as tf\n'), ((2388, 2435), 'baconian.common.data_pre_processing.IdenticalDataScaler', 'IdenticalDataScaler', ([], {'dims': 'env_spec.flat_obs_dim'}), '(dims=env_spec.flat_obs_dim)\n', (2407, 2435), False, 'from baconian.common.data_pre_processing import DataScaler, IdenticalDataScaler\n'), ((2532, 2582), 'baconian.common.data_pre_processing.IdenticalDataScaler', 'IdenticalDataScaler', ([], {'dims': 'env_spec.flat_action_dim'}), '(dims=env_spec.flat_action_dim)\n', (2551, 2582), False, 'from baconian.common.data_pre_processing import DataScaler, IdenticalDataScaler\n'), ((2679, 2726), 'baconian.common.data_pre_processing.IdenticalDataScaler', 'IdenticalDataScaler', ([], {'dims': 'env_spec.flat_obs_dim'}), '(dims=env_spec.flat_obs_dim)\n', (2698, 2726), False, 'from baconian.common.data_pre_processing import DataScaler, IdenticalDataScaler\n'), ((8807, 8829), 'tensorflow.stack', 'tf.stack', (['hessian_node'], {}), '(hessian_node)\n', (8815, 8829), True, 'import tensorflow as tf\n'), ((9980, 9995), 'baconian.common.logging.ConsoleLogger', 'ConsoleLogger', ([], {}), '()\n', (9993, 9995), False, 'from baconian.common.logging import ConsoleLogger\n'), ((3759, 3774), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (3767, 3774), True, 'import numpy as np\n'), ((8048, 8090), 'tensorflow.python.ops.parallel_for.gradients.batch_jacobian', 'tf_batch_jacobian', ([], {'output': 'o_node', 'inp': 'node'}), '(output=o_node, inp=node)\n', (8065, 8090), True, 'from tensorflow.python.ops.parallel_for.gradients import batch_jacobian as tf_batch_jacobian\n'), ((8605, 8632), 'tensorflow.hessians', 'tf.hessians', (['o_node', 'innode'], {}), '(o_node, innode)\n', (8616, 8632), True, 'import tensorflow as tf\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from itertools import chain
import numpy as np
import pytest
from pymor.core.exceptions import InversionError
from pymor.operators.constructions import SelectionOperator
from pymor.parameters.base import ParameterType
from pymor.parameters.functionals import GenericParameterFunctional
from pymor.tools.floatcmp import float_cmp_all
from pymor.vectorarrays.numpy import NumpyVectorArray
from pymortests.algorithms import MonomOperator
from pymortests.fixtures.operator import operator, operator_with_arrays, operator_with_arrays_and_products
from pymortests.pickle import assert_picklable, assert_picklable_without_dumps_function
from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds
def test_selection_op():
p1 = MonomOperator(1)
select_rhs_functional = GenericParameterFunctional(
lambda x: round(float(x["nrrhs"])),
ParameterType({"nrrhs" : tuple()})
)
s1 = SelectionOperator(
operators = [p1],
boundaries = [],
parameter_functional = select_rhs_functional,
name = "foo"
)
x = np.linspace(-1., 1., num=3)
vx = NumpyVectorArray(x[:, np.newaxis])
assert np.allclose(p1.apply(vx,mu=0).data, s1.apply(vx,mu=0).data)
s2 = SelectionOperator(
operators = [p1,p1,p1,p1],
boundaries = [-3, 3, 7],
parameter_functional = select_rhs_functional,
name = "Bar"
)
assert s2._get_operator_number({"nrrhs":-4}) == 0
assert s2._get_operator_number({"nrrhs":-3}) == 0
assert s2._get_operator_number({"nrrhs":-2}) == 1
assert s2._get_operator_number({"nrrhs":3}) == 1
assert s2._get_operator_number({"nrrhs":4}) == 2
assert s2._get_operator_number({"nrrhs":7}) == 2
assert s2._get_operator_number({"nrrhs":9}) == 3
def test_lincomb_op():
p1 = MonomOperator(1)
p2 = MonomOperator(2)
p12 = p1 + p2
p0 = p1 - p1
x = np.linspace(-1., 1., num=3)
vx = NumpyVectorArray(x[:, np.newaxis])
assert np.allclose(p0.apply(vx).data, [0.])
assert np.allclose(p12.apply(vx).data, (x * x + x)[:, np.newaxis])
assert np.allclose((p1 * 2.).apply(vx).data, (x * 2.)[:, np.newaxis])
assert p2.jacobian(vx).apply(vx).almost_equal(p1.apply(vx) * 2.).all()
assert p0.jacobian(vx).apply(vx).almost_equal(vx * 0.).all()
with pytest.raises(TypeError):
p2.as_vector()
p1.as_vector()
assert p1.as_vector().almost_equal(p1.apply(NumpyVectorArray(1.)))
basis = NumpyVectorArray([1.])
for p in (p1, p2, p12):
projected = p.projected(basis, basis)
pa = projected.apply(vx)
assert pa.almost_equal(p.apply(vx)).all()
def test_pickle(operator):
assert_picklable(operator)
def test_pickle_without_dumps_function(operator):
assert_picklable_without_dumps_function(operator)
def test_apply(operator_with_arrays):
op, mu, U, _ = operator_with_arrays
V = op.apply(U, mu=mu)
assert V in op.range
assert len(V) == len(U)
for ind in valid_inds(U):
Vind = op.apply(U, mu=mu, ind=ind)
assert np.all(Vind.almost_equal(V, o_ind=ind))
assert np.all(Vind.almost_equal(op.apply(U.copy(ind=ind), mu=mu)))
def test_apply2(operator_with_arrays):
op, mu, U, V = operator_with_arrays
for U_ind in valid_inds(U):
for V_ind in valid_inds(V):
M = op.apply2(V, U, U_ind=U_ind, V_ind=V_ind, mu=mu)
assert M.shape == (V.len_ind(V_ind), U.len_ind(U_ind))
M2 = V.dot(op.apply(U, ind=U_ind, mu=mu), ind=V_ind)
assert np.allclose(M, M2)
def test_apply2_with_product(operator_with_arrays_and_products):
op, mu, U, V, sp, rp = operator_with_arrays_and_products
for U_ind in valid_inds(U):
for V_ind in valid_inds(V):
M = op.apply2(V, U, U_ind=U_ind, V_ind=V_ind, mu=mu, product=rp)
assert M.shape == (V.len_ind(V_ind), U.len_ind(U_ind))
M2 = V.dot(rp.apply(op.apply(U, ind=U_ind, mu=mu)), ind=V_ind)
assert np.allclose(M, M2)
def test_pairwise_apply2(operator_with_arrays):
op, mu, U, V = operator_with_arrays
for U_ind, V_ind in valid_inds_of_same_length(U, V):
M = op.pairwise_apply2(V, U, U_ind=U_ind, V_ind=V_ind, mu=mu)
assert M.shape == (V.len_ind(V_ind),)
M2 = V.pairwise_dot(op.apply(U, ind=U_ind, mu=mu), ind=V_ind)
assert np.allclose(M, M2)
def test_pairwise_apply2_with_product(operator_with_arrays_and_products):
op, mu, U, V, sp, rp = operator_with_arrays_and_products
for U_ind, V_ind in valid_inds_of_same_length(U, V):
M = op.pairwise_apply2(V, U, U_ind=U_ind, V_ind=V_ind, mu=mu, product=rp)
assert M.shape == (V.len_ind(V_ind),)
M2 = V.pairwise_dot(rp.apply(op.apply(U, ind=U_ind, mu=mu)), ind=V_ind)
assert np.allclose(M, M2)
def test_apply_adjoint(operator_with_arrays):
op, mu, _, V = operator_with_arrays
try:
U = op.apply_adjoint(V, mu=mu)
except NotImplementedError:
return
assert U in op.source
assert len(V) == len(U)
for ind in list(valid_inds(V, 3)) + [[]]:
Uind = op.apply_adjoint(V, mu=mu, ind=ind)
assert np.all(Uind.almost_equal(U, o_ind=ind))
assert np.all(Uind.almost_equal(op.apply_adjoint(V.copy(ind=ind), mu=mu)))
def test_apply_adjoint_2(operator_with_arrays):
op, mu, U, V = operator_with_arrays
try:
ATV = op.apply_adjoint(V, mu=mu)
except NotImplementedError:
return
assert np.allclose(V.dot(op.apply(U, mu=mu)), ATV.dot(U))
def test_apply_adjoint_2_with_products(operator_with_arrays_and_products):
op, mu, U, V, sp, rp = operator_with_arrays_and_products
try:
ATV = op.apply_adjoint(V, mu=mu, source_product=sp, range_product=rp)
except NotImplementedError:
return
assert np.allclose(rp.apply2(V, op.apply(U, mu=mu)), sp.apply2(ATV, U))
def test_apply_inverse(operator_with_arrays):
op, mu, _, V = operator_with_arrays
for options in chain([None], op.invert_options, op.invert_options.itervalues()):
for ind in valid_inds(V):
try:
U = op.apply_inverse(V, mu=mu, ind=ind, options=options)
except InversionError:
return
assert U in op.source
assert len(U) == V.len_ind(ind)
VV = op.apply(U, mu=mu)
if (isinstance(options, str) and options.startswith('least_squares')
or not isinstance(options, (str, type(None))) and options['type'].startswith('least_squares')):
continue
assert float_cmp_all(VV.l2_norm(), V.l2_norm(ind=ind), atol=1e-10, rtol=0.5)
def test_projected(operator_with_arrays):
op, mu, U, V = operator_with_arrays
op_UV = op.projected(U, V)
np.random.seed(4711 + U.dim + len(V))
coeffs = np.random.random(len(U))
X = op_UV.apply(NumpyVectorArray(coeffs, copy=False), mu=mu)
Y = NumpyVectorArray(V.dot(op.apply(U.lincomb(coeffs), mu=mu)).T, copy=False)
assert np.all(X.almost_equal(Y))
def test_projected_2(operator_with_arrays):
op, mu, U, V = operator_with_arrays
op_U = op.projected(U, None)
op_V = op.projected(None, V)
op_U_V = op_U.projected(None, V)
op_V_U = op_V.projected(U, None)
op_UV = op.projected(U, V)
np.random.seed(4711 + U.dim + len(V))
W = NumpyVectorArray(np.random.random(len(U)), copy=False)
Y0 = op_UV.apply(W, mu=mu)
Y1 = op_U_V.apply(W, mu=mu)
Y2 = op_V_U.apply(W, mu=mu)
assert np.all(Y0.almost_equal(Y1))
assert np.all(Y0.almost_equal(Y2))
def test_projected_with_product(operator_with_arrays_and_products):
op, mu, U, V, sp, rp = operator_with_arrays_and_products
op_UV = op.projected(U, V, product=rp)
np.random.seed(4711 + U.dim + len(V))
coeffs = np.random.random(len(U))
X = op_UV.apply(NumpyVectorArray(coeffs, copy=False), mu=mu)
Y = NumpyVectorArray(rp.apply2(op.apply(U.lincomb(coeffs), mu=mu), V), copy=False)
assert np.all(X.almost_equal(Y))
def test_projected_with_product_2(operator_with_arrays_and_products):
op, mu, U, V, sp, rp = operator_with_arrays_and_products
op_U = op.projected(U, None)
op_V = op.projected(None, V, product=rp)
op_U_V = op_U.projected(None, V, product=rp)
op_V_U = op_V.projected(U, None)
op_UV = op.projected(U, V, product=rp)
np.random.seed(4711 + U.dim + len(V))
W = NumpyVectorArray(np.random.random(len(U)), copy=False)
Y0 = op_UV.apply(W, mu=mu)
Y1 = op_U_V.apply(W, mu=mu)
Y2 = op_V_U.apply(W, mu=mu)
assert np.all(Y0.almost_equal(Y1))
assert np.all(Y0.almost_equal(Y2))
def test_jacobian(operator_with_arrays):
op, mu, U, _ = operator_with_arrays
try:
j = op.jacobian(U, mu=mu)
except NotImplementedError:
return
assert j.linear
assert op.source == j.source
assert op.range == j.range
def test_assemble(operator_with_arrays):
op, mu, _, _ = operator_with_arrays
aop = op.assemble(mu=mu)
assert op.source == aop.source
assert op.range == aop.range
########################################################################################################################
def test_apply_wrong_ind(operator_with_arrays):
op, mu, U, _ = operator_with_arrays
for ind in invalid_inds(U):
with pytest.raises(Exception):
op.apply(U, mu=mu, ind=ind)
def test_apply2_wrong_ind(operator_with_arrays):
op, mu, U, V = operator_with_arrays
for ind in invalid_inds(U):
with pytest.raises(Exception):
op.apply2(U, V, mu=mu, ind=ind)
for ind in invalid_inds(V):
with pytest.raises(Exception):
op.apply2(U, V, mu=mu, ind=ind)
def test_apply_adjoint_wrong_ind(operator_with_arrays):
op, mu, _, V = operator_with_arrays
for ind in invalid_inds(V):
with pytest.raises(Exception):
op.apply_adjoint(V, mu=mu, ind=ind)
def test_apply_inverse_wrong_ind(operator_with_arrays):
op, mu, _, V = operator_with_arrays
for ind in invalid_inds(V):
with pytest.raises(Exception):
op.apply_inverse(V, mu=mu, ind=ind)
| [
"pymor.operators.constructions.SelectionOperator",
"numpy.allclose",
"pymortests.algorithms.MonomOperator",
"pymor.vectorarrays.numpy.NumpyVectorArray",
"pymortests.pickle.assert_picklable_without_dumps_function",
"pymortests.vectorarray.valid_inds",
"pymortests.vectorarray.invalid_inds",
"pymortests.... | [((1008, 1024), 'pymortests.algorithms.MonomOperator', 'MonomOperator', (['(1)'], {}), '(1)\n', (1021, 1024), False, 'from pymortests.algorithms import MonomOperator\n'), ((1184, 1293), 'pymor.operators.constructions.SelectionOperator', 'SelectionOperator', ([], {'operators': '[p1]', 'boundaries': '[]', 'parameter_functional': 'select_rhs_functional', 'name': '"""foo"""'}), "(operators=[p1], boundaries=[], parameter_functional=\n select_rhs_functional, name='foo')\n", (1201, 1293), False, 'from pymor.operators.constructions import SelectionOperator\n'), ((1345, 1374), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)'], {'num': '(3)'}), '(-1.0, 1.0, num=3)\n', (1356, 1374), True, 'import numpy as np\n'), ((1382, 1416), 'pymor.vectorarrays.numpy.NumpyVectorArray', 'NumpyVectorArray', (['x[:, np.newaxis]'], {}), '(x[:, np.newaxis])\n', (1398, 1416), False, 'from pymor.vectorarrays.numpy import NumpyVectorArray\n'), ((1498, 1626), 'pymor.operators.constructions.SelectionOperator', 'SelectionOperator', ([], {'operators': '[p1, p1, p1, p1]', 'boundaries': '[-3, 3, 7]', 'parameter_functional': 'select_rhs_functional', 'name': '"""Bar"""'}), "(operators=[p1, p1, p1, p1], boundaries=[-3, 3, 7],\n parameter_functional=select_rhs_functional, name='Bar')\n", (1515, 1626), False, 'from pymor.operators.constructions import SelectionOperator\n'), ((2074, 2090), 'pymortests.algorithms.MonomOperator', 'MonomOperator', (['(1)'], {}), '(1)\n', (2087, 2090), False, 'from pymortests.algorithms import MonomOperator\n'), ((2100, 2116), 'pymortests.algorithms.MonomOperator', 'MonomOperator', (['(2)'], {}), '(2)\n', (2113, 2116), False, 'from pymortests.algorithms import MonomOperator\n'), ((2160, 2189), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)'], {'num': '(3)'}), '(-1.0, 1.0, num=3)\n', (2171, 2189), True, 'import numpy as np\n'), ((2197, 2231), 'pymor.vectorarrays.numpy.NumpyVectorArray', 'NumpyVectorArray', (['x[:, np.newaxis]'], {}), '(x[:, np.newaxis])\n', (2213, 2231), False, 'from pymor.vectorarrays.numpy import NumpyVectorArray\n'), ((2726, 2749), 'pymor.vectorarrays.numpy.NumpyVectorArray', 'NumpyVectorArray', (['[1.0]'], {}), '([1.0])\n', (2742, 2749), False, 'from pymor.vectorarrays.numpy import NumpyVectorArray\n'), ((2939, 2965), 'pymortests.pickle.assert_picklable', 'assert_picklable', (['operator'], {}), '(operator)\n', (2955, 2965), False, 'from pymortests.pickle import assert_picklable, assert_picklable_without_dumps_function\n'), ((3022, 3071), 'pymortests.pickle.assert_picklable_without_dumps_function', 'assert_picklable_without_dumps_function', (['operator'], {}), '(operator)\n', (3061, 3071), False, 'from pymortests.pickle import assert_picklable, assert_picklable_without_dumps_function\n'), ((3247, 3260), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['U'], {}), '(U)\n', (3257, 3260), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((3533, 3546), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['U'], {}), '(U)\n', (3543, 3546), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((3964, 3977), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['U'], {}), '(U)\n', (3974, 3977), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((4386, 4417), 'pymortests.vectorarray.valid_inds_of_same_length', 'valid_inds_of_same_length', (['U', 'V'], {}), '(U, V)\n', (4411, 4417), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((4800, 4831), 'pymortests.vectorarray.valid_inds_of_same_length', 'valid_inds_of_same_length', (['U', 'V'], {}), '(U, V)\n', (4825, 4831), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((9564, 9579), 'pymortests.vectorarray.invalid_inds', 'invalid_inds', (['U'], {}), '(U)\n', (9576, 9579), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((9766, 9781), 'pymortests.vectorarray.invalid_inds', 'invalid_inds', (['U'], {}), '(U)\n', (9778, 9781), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((9881, 9896), 'pymortests.vectorarray.invalid_inds', 'invalid_inds', (['V'], {}), '(V)\n', (9893, 9896), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((10094, 10109), 'pymortests.vectorarray.invalid_inds', 'invalid_inds', (['V'], {}), '(V)\n', (10106, 10109), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((10311, 10326), 'pymortests.vectorarray.invalid_inds', 'invalid_inds', (['V'], {}), '(V)\n', (10323, 10326), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((2574, 2598), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2587, 2598), False, 'import pytest\n'), ((3569, 3582), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['V'], {}), '(V)\n', (3579, 3582), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((4000, 4013), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['V'], {}), '(V)\n', (4010, 4013), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((4620, 4638), 'numpy.allclose', 'np.allclose', (['M', 'M2'], {}), '(M, M2)\n', (4631, 4638), True, 'import numpy as np\n'), ((5056, 5074), 'numpy.allclose', 'np.allclose', (['M', 'M2'], {}), '(M, M2)\n', (5067, 5074), True, 'import numpy as np\n'), ((6336, 6349), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['V'], {}), '(V)\n', (6346, 6349), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((7139, 7175), 'pymor.vectorarrays.numpy.NumpyVectorArray', 'NumpyVectorArray', (['coeffs'], {'copy': '(False)'}), '(coeffs, copy=False)\n', (7155, 7175), False, 'from pymor.vectorarrays.numpy import NumpyVectorArray\n'), ((8112, 8148), 'pymor.vectorarrays.numpy.NumpyVectorArray', 'NumpyVectorArray', (['coeffs'], {'copy': '(False)'}), '(coeffs, copy=False)\n', (8128, 8148), False, 'from pymor.vectorarrays.numpy import NumpyVectorArray\n'), ((2690, 2711), 'pymor.vectorarrays.numpy.NumpyVectorArray', 'NumpyVectorArray', (['(1.0)'], {}), '(1.0)\n', (2706, 2711), False, 'from pymor.vectorarrays.numpy import NumpyVectorArray\n'), ((3800, 3818), 'numpy.allclose', 'np.allclose', (['M', 'M2'], {}), '(M, M2)\n', (3811, 3818), True, 'import numpy as np\n'), ((4253, 4271), 'numpy.allclose', 'np.allclose', (['M', 'M2'], {}), '(M, M2)\n', (4264, 4271), True, 'import numpy as np\n'), ((5332, 5348), 'pymortests.vectorarray.valid_inds', 'valid_inds', (['V', '(3)'], {}), '(V, 3)\n', (5342, 5348), False, 'from pymortests.vectorarray import valid_inds, valid_inds_of_same_length, invalid_inds\n'), ((9594, 9618), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9607, 9618), False, 'import pytest\n'), ((9796, 9820), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9809, 9820), False, 'import pytest\n'), ((9911, 9935), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9924, 9935), False, 'import pytest\n'), ((10124, 10148), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (10137, 10148), False, 'import pytest\n'), ((10341, 10365), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (10354, 10365), False, 'import pytest\n')] |
"""
CIE Chromaticity Diagrams Plotting
==================================
Defines the *CIE* chromaticity diagrams plotting objects:
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.plot_chromaticity_diagram_CIE1976UCS`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1976UCS`
"""
from __future__ import annotations
import bisect
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection
from matplotlib.patches import Polygon
from colour.algebra import normalise_maximum, normalise_vector
from colour.colorimetry import (
MultiSpectralDistributions,
SDS_ILLUMINANTS,
SpectralDistribution,
sd_to_XYZ,
sds_and_msds_to_sds,
)
from colour.hints import (
Any,
ArrayLike,
Boolean,
Callable,
Dict,
Floating,
Integer,
List,
Literal,
NDArray,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from colour.models import (
Luv_to_uv,
Luv_uv_to_xy,
UCS_to_uv,
UCS_uv_to_xy,
XYZ_to_Luv,
XYZ_to_UCS,
XYZ_to_xy,
xy_to_XYZ,
)
from colour.notation import HEX_to_RGB
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
CONSTANTS_ARROW_STYLE,
XYZ_to_plotting_colourspace,
artist,
filter_cmfs,
filter_illuminants,
override_style,
render,
update_settings_collection,
)
from colour.utilities import (
as_float_array,
domain_range_scale,
first_item,
is_string,
optional,
tsplit,
tstack,
validate_method,
)
__author__ = "<NAME>"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"plot_spectral_locus",
"plot_chromaticity_diagram_colours",
"plot_chromaticity_diagram",
"plot_chromaticity_diagram_CIE1931",
"plot_chromaticity_diagram_CIE1960UCS",
"plot_chromaticity_diagram_CIE1976UCS",
"plot_sds_in_chromaticity_diagram",
"plot_sds_in_chromaticity_diagram_CIE1931",
"plot_sds_in_chromaticity_diagram_CIE1960UCS",
"plot_sds_in_chromaticity_diagram_CIE1976UCS",
]
@override_style()
def plot_spectral_locus(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
spectral_locus_colours: Optional[Union[ArrayLike, str]] = None,
spectral_locus_opacity: Floating = 1,
spectral_locus_labels: Optional[Sequence] = None,
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *Spectral Locus* according to given method.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
spectral_locus_colours
Colours of the *Spectral Locus*, if ``spectral_locus_colours`` is set
to *RGB*, the colours will be computed according to the corresponding
chromaticity coordinates.
spectral_locus_opacity
Opacity of the *Spectral Locus*.
spectral_locus_labels
Array of wavelength labels used to customise which labels will be drawn
around the spectral locus. Passing an empty array will result in no
wavelength labels being drawn.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_spectral_locus(spectral_locus_colours='RGB') # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Spectral_Locus.png
:align: center
:alt: plot_spectral_locus
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
spectral_locus_colours = optional(
spectral_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint
wavelengths = list(cmfs.wavelengths)
equal_energy = np.array([1 / 3] * 2)
if method == "cie 1931":
ij = XYZ_to_xy(cmfs.values, illuminant)
labels = cast(
Tuple,
optional(
spectral_locus_labels,
(
390,
460,
470,
480,
490,
500,
510,
520,
540,
560,
580,
600,
620,
700,
),
),
)
elif method == "cie 1960 ucs":
ij = UCS_to_uv(XYZ_to_UCS(cmfs.values))
labels = cast(
Tuple,
optional(
spectral_locus_labels,
(
420,
440,
450,
460,
470,
480,
490,
500,
510,
520,
530,
540,
550,
560,
570,
580,
590,
600,
610,
620,
630,
645,
680,
),
),
)
elif method == "cie 1976 ucs":
ij = Luv_to_uv(XYZ_to_Luv(cmfs.values, illuminant), illuminant)
labels = cast(
Tuple,
optional(
spectral_locus_labels,
(
420,
440,
450,
460,
470,
480,
490,
500,
510,
520,
530,
540,
550,
560,
570,
580,
590,
600,
610,
620,
630,
645,
680,
),
),
)
pl_ij = np.reshape(
tstack(
[
np.linspace(ij[0][0], ij[-1][0], 20),
np.linspace(ij[0][1], ij[-1][1], 20),
]
),
(-1, 1, 2),
)
sl_ij = np.copy(ij).reshape(-1, 1, 2)
purple_line_colours: Optional[Union[ArrayLike, str]]
if str(spectral_locus_colours).upper() == "RGB":
spectral_locus_colours = normalise_maximum(
XYZ_to_plotting_colourspace(cmfs.values), axis=-1
)
if method == "cie 1931":
XYZ = xy_to_XYZ(pl_ij)
elif method == "cie 1960 ucs":
XYZ = xy_to_XYZ(UCS_uv_to_xy(pl_ij))
elif method == "cie 1976 ucs":
XYZ = xy_to_XYZ(Luv_uv_to_xy(pl_ij))
purple_line_colours = normalise_maximum(
XYZ_to_plotting_colourspace(np.reshape(XYZ, (-1, 3))), axis=-1
)
else:
purple_line_colours = spectral_locus_colours
for slp_ij, slp_colours in (
(pl_ij, purple_line_colours),
(sl_ij, spectral_locus_colours),
):
line_collection = LineCollection(
np.concatenate([slp_ij[:-1], slp_ij[1:]], axis=1),
colors=slp_colours,
alpha=spectral_locus_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_scatter,
)
axes.add_collection(line_collection)
wl_ij = dict(zip(wavelengths, ij))
for label in labels:
ij_l = wl_ij.get(label)
if ij_l is None:
continue
ij_l = as_float_array([ij_l])
i, j = tsplit(ij_l)
index = bisect.bisect(wavelengths, label)
left = wavelengths[index - 1] if index >= 0 else wavelengths[index]
right = (
wavelengths[index] if index < len(wavelengths) else wavelengths[-1]
)
dx = wl_ij[right][0] - wl_ij[left][0]
dy = wl_ij[right][1] - wl_ij[left][1]
direction = np.array([-dy, dx])
normal = (
np.array([-dy, dx])
if np.dot(
normalise_vector(ij_l - equal_energy),
normalise_vector(direction),
)
> 0
else np.array([dy, -dx])
)
normal = as_float_array(normalise_vector(normal) / 30)
label_colour = (
spectral_locus_colours
if is_string(spectral_locus_colours)
else spectral_locus_colours[index] # type: ignore[index]
)
axes.plot(
(i, i + normal[0] * 0.75),
(j, j + normal[1] * 0.75),
color=label_colour,
alpha=spectral_locus_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_line,
)
axes.plot(
i,
j,
"o",
color=label_colour,
alpha=spectral_locus_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_line,
)
axes.text(
i + normal[0],
j + normal[1],
label,
clip_on=True,
ha="left" if normal[0] >= 0 else "right",
va="center",
fontdict={"size": "small"},
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_label,
)
settings = {"axes": axes}
settings.update(kwargs)
return render(**kwargs)
@override_style()
def plot_chromaticity_diagram_colours(
samples: Integer = 256,
diagram_colours: Optional[Union[ArrayLike, str]] = None,
diagram_opacity: Floating = 1,
diagram_clipping_path: Optional[ArrayLike] = None,
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *Chromaticity Diagram* colours according to given method.
Parameters
----------
samples
Samples count on one axis when computing the *Chromaticity Diagram*
colours.
diagram_colours
Colours of the *Chromaticity Diagram*, if ``diagram_colours`` is set
to *RGB*, the colours will be computed according to the corresponding
coordinates.
diagram_opacity
Opacity of the *Chromaticity Diagram*.
diagram_clipping_path
Path of points used to clip the *Chromaticity Diagram* colours.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_colours(diagram_colours='RGB')
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_Colours.png
:align: center
:alt: plot_chromaticity_diagram_colours
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
diagram_colours = cast(
ArrayLike,
optional(
diagram_colours, HEX_to_RGB(CONSTANTS_COLOUR_STYLE.colour.average)
),
)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint
if method == "cie 1931":
spectral_locus = XYZ_to_xy(cmfs.values, illuminant)
elif method == "cie 1960 ucs":
spectral_locus = UCS_to_uv(XYZ_to_UCS(cmfs.values))
elif method == "cie 1976 ucs":
spectral_locus = Luv_to_uv(
XYZ_to_Luv(cmfs.values, illuminant), illuminant
)
use_RGB_diagram_colours = str(diagram_colours).upper() == "RGB"
if use_RGB_diagram_colours:
ii, jj = np.meshgrid(
np.linspace(0, 1, samples), np.linspace(1, 0, samples)
)
ij = tstack([ii, jj])
if method == "cie 1931":
XYZ = xy_to_XYZ(ij)
elif method == "cie 1960 ucs":
XYZ = xy_to_XYZ(UCS_uv_to_xy(ij))
elif method == "cie 1976 ucs":
XYZ = xy_to_XYZ(Luv_uv_to_xy(ij))
diagram_colours = normalise_maximum(
XYZ_to_plotting_colourspace(XYZ, illuminant), axis=-1
)
polygon = Polygon(
spectral_locus
if diagram_clipping_path is None
else diagram_clipping_path,
facecolor="none"
if use_RGB_diagram_colours
else np.hstack([diagram_colours, diagram_opacity]),
edgecolor="none"
if use_RGB_diagram_colours
else np.hstack([diagram_colours, diagram_opacity]),
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
axes.add_patch(polygon)
if use_RGB_diagram_colours:
# Preventing bounding box related issues as per
# https://github.com/matplotlib/matplotlib/issues/10529
image = axes.imshow(
diagram_colours,
interpolation="bilinear",
extent=(0, 1, 0, 1),
clip_path=None,
alpha=diagram_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
image.set_clip_path(polygon)
settings = {"axes": axes}
settings.update(kwargs)
return render(**kwargs)
@override_style()
def plot_chromaticity_diagram(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *Chromaticity Diagram* according to given method.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_spectral_locus`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram_colours`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram.png
:align: center
:alt: plot_chromaticity_diagram
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
if show_diagram_colours:
settings = {"axes": axes, "method": method, "diagram_colours": "RGB"}
settings.update(kwargs)
settings["standalone"] = False
settings["cmfs"] = cmfs
plot_chromaticity_diagram_colours(**settings)
if show_spectral_locus:
settings = {"axes": axes, "method": method}
settings.update(kwargs)
settings["standalone"] = False
settings["cmfs"] = cmfs
plot_spectral_locus(**settings)
if method == "cie 1931":
x_label, y_label = "CIE x", "CIE y"
elif method == "cie 1960 ucs":
x_label, y_label = "CIE u", "CIE v"
elif method == "cie 1976 ucs":
x_label, y_label = (
"CIE u'",
"CIE v'",
)
title = f"{method.upper()} Chromaticity Diagram - {cmfs.strict_name}"
settings.update(
{
"axes": axes,
"standalone": True,
"bounding_box": (0, 1, 0, 1),
"title": title,
"x_label": x_label,
"y_label": y_label,
}
)
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_chromaticity_diagram_CIE1931(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1931 Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1931() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
@override_style()
def plot_chromaticity_diagram_CIE1960UCS(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1960UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
@override_style()
def plot_chromaticity_diagram_CIE1976UCS(
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
show_diagram_colours: Boolean = True,
show_spectral_locus: Boolean = True,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot the *CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
show_diagram_colours
Whether to display the *Chromaticity Diagram* background colours.
show_spectral_locus
Whether to display the *Spectral Locus*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_chromaticity_diagram_CIE1976UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_chromaticity_diagram(
cmfs, show_diagram_colours, show_spectral_locus, **settings
)
@override_style()
def plot_sds_in_chromaticity_diagram(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable: Callable = plot_chromaticity_diagram,
method: Union[
Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str
] = "CIE 1931",
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*Chromaticity Diagram* using given method.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single
:class:`colour.MultiSpectralDistributions` class instance, a list
of :class:`colour.MultiSpectralDistributions` class instances or a
list of :class:`colour.SpectralDistribution` class instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable
Callable responsible for drawing the *Chromaticity Diagram*.
method
*Chromaticity Diagram* method.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> annotate_kwargs = [
... {'xytext': (-25, 15), 'arrowprops':{'arrowstyle':'-'}},
... {}
... ]
>>> plot_kwargs = [
... {
... 'illuminant': SDS_ILLUMINANTS['E'],
... 'markersize' : 15,
... 'normalise_sd_colours': True,
... 'use_sd_colours': True
... },
... {'illuminant': SDS_ILLUMINANTS['E']},
... ]
>>> plot_sds_in_chromaticity_diagram(
... [A, D65], annotate_kwargs=annotate_kwargs, plot_kwargs=plot_kwargs)
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_SDS_In_Chromaticity_Diagram.png
:align: center
:alt: plot_sds_in_chromaticity_diagram
"""
method = validate_method(
method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
)
sds_converted = sds_and_msds_to_sds(sds)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
settings.update(
{
"axes": axes,
"standalone": False,
"method": method,
"cmfs": cmfs,
}
)
chromaticity_diagram_callable(**settings)
if method == "cie 1931":
def XYZ_to_ij(XYZ: NDArray) -> NDArray:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return XYZ_to_xy(XYZ)
bounding_box = (-0.1, 0.9, -0.1, 0.9)
elif method == "cie 1960 ucs":
def XYZ_to_ij(XYZ: NDArray) -> NDArray:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return UCS_to_uv(XYZ_to_UCS(XYZ))
bounding_box = (-0.1, 0.7, -0.2, 0.6)
elif method == "cie 1976 ucs":
def XYZ_to_ij(XYZ: NDArray) -> NDArray:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return Luv_to_uv(XYZ_to_Luv(XYZ))
bounding_box = (-0.1, 0.7, -0.1, 0.7)
annotate_settings_collection = [
{
"annotate": True,
"xytext": (-50, 30),
"textcoords": "offset points",
"arrowprops": CONSTANTS_ARROW_STYLE,
"zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_annotation,
}
for _ in range(len(sds_converted))
]
if annotate_kwargs is not None:
update_settings_collection(
annotate_settings_collection, annotate_kwargs, len(sds_converted)
)
plot_settings_collection = [
{
"color": CONSTANTS_COLOUR_STYLE.colour.brightest,
"label": f"{sd.strict_name}",
"marker": "o",
"markeredgecolor": CONSTANTS_COLOUR_STYLE.colour.dark,
"markeredgewidth": CONSTANTS_COLOUR_STYLE.geometry.short * 0.75,
"markersize": (
CONSTANTS_COLOUR_STYLE.geometry.short * 6
+ CONSTANTS_COLOUR_STYLE.geometry.short * 0.75
),
"zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_line,
"cmfs": cmfs,
"illuminant": SDS_ILLUMINANTS[
CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint_name
],
"use_sd_colours": False,
"normalise_sd_colours": False,
}
for sd in sds_converted
]
if plot_kwargs is not None:
update_settings_collection(
plot_settings_collection, plot_kwargs, len(sds_converted)
)
for i, sd in enumerate(sds_converted):
plot_settings = plot_settings_collection[i]
cmfs = cast(
MultiSpectralDistributions,
first_item(filter_cmfs(plot_settings.pop("cmfs")).values()),
)
illuminant = cast(
SpectralDistribution,
first_item(
filter_illuminants(plot_settings.pop("illuminant")).values()
),
)
normalise_sd_colours = plot_settings.pop("normalise_sd_colours")
use_sd_colours = plot_settings.pop("use_sd_colours")
with domain_range_scale("1"):
XYZ = sd_to_XYZ(sd, cmfs, illuminant)
if use_sd_colours:
if normalise_sd_colours:
XYZ /= XYZ[..., 1]
plot_settings["color"] = np.clip(
XYZ_to_plotting_colourspace(XYZ), 0, 1
)
ij = XYZ_to_ij(XYZ)
axes.plot(ij[0], ij[1], **plot_settings)
if sd.name is not None and annotate_settings_collection[i]["annotate"]:
annotate_settings = annotate_settings_collection[i]
annotate_settings.pop("annotate")
axes.annotate(sd.name, xy=ij, **annotate_settings)
settings.update({"standalone": True, "bounding_box": bounding_box})
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_sds_in_chromaticity_diagram_CIE1931(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1931: Callable = (
plot_chromaticity_diagram_CIE1931
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*CIE 1931 Chromaticity Diagram*.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single :class:`colour.MultiSpectralDistributions`
class instance, a list of :class:`colour.MultiSpectralDistributions`
class instances or a list of :class:`colour.SpectralDistribution` class
instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1931
Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> plot_sds_in_chromaticity_diagram_CIE1931([A, D65])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_SDS_In_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_sds_in_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_sds_in_chromaticity_diagram(
sds,
cmfs,
chromaticity_diagram_callable_CIE1931,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_sds_in_chromaticity_diagram_CIE1960UCS(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1960UCS: Callable = (
plot_chromaticity_diagram_CIE1960UCS
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single :class:`colour.MultiSpectralDistributions`
class instance, a list of :class:`colour.MultiSpectralDistributions`
class instances or a list of :class:`colour.SpectralDistribution` class
instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1960UCS
Callable responsible for drawing the
*CIE 1960 UCS Chromaticity Diagram*.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> plot_sds_in_chromaticity_diagram_CIE1960UCS([A, D65])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_SDS_In_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_sds_in_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_sds_in_chromaticity_diagram(
sds,
cmfs,
chromaticity_diagram_callable_CIE1960UCS,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_sds_in_chromaticity_diagram_CIE1976UCS(
sds: Union[
Sequence[Union[SpectralDistribution, MultiSpectralDistributions]],
MultiSpectralDistributions,
],
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1976UCS: Callable = (
plot_chromaticity_diagram_CIE1976UCS
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution chromaticity coordinates into the
*CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
sds
Spectral distributions or multi-spectral distributions to
plot. `sds` can be a single :class:`colour.MultiSpectralDistributions`
class instance, a list of :class:`colour.MultiSpectralDistributions`
class instances or a list of :class:`colour.SpectralDistribution` class
instances.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1976UCS
Callable responsible for drawing the
*CIE 1976 UCS Chromaticity Diagram*.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted spectral distributions.
`plot_kwargs`` can be either a single dictionary applied to all the
plotted spectral distributions with the same settings or a sequence of
dictionaries with different settings for each plotted spectral
distributions. The following special keyword arguments can also be
used:
- ``illuminant`` : The illuminant used to compute the spectral
distributions colours. The default is the illuminant associated
with the whitepoint of the default plotting colourspace.
``illuminant`` can be of any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``cmfs`` : The standard observer colour matching functions used for
computing the spectral distributions colours. ``cmfs`` can be of
any type or form supported by the
:func:`colour.plotting.filter_cmfs` definition.
- ``normalise_sd_colours`` : Whether to normalise the computed
spectral distributions colours. The default is *True*.
- ``use_sd_colours`` : Whether to use the computed spectral
distributions colours under the plotting colourspace illuminant.
Alternatively, it is possible to use the
:func:`matplotlib.pyplot.plot` definition ``color`` argument with
pre-computed values. The default is *True*.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> A = SDS_ILLUMINANTS['A']
>>> D65 = SDS_ILLUMINANTS['D65']
>>> plot_sds_in_chromaticity_diagram_CIE1976UCS([A, D65])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_SDS_In_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_sds_in_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_sds_in_chromaticity_diagram(
sds,
cmfs,
chromaticity_diagram_callable_CIE1976UCS,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
| [
"numpy.hstack",
"colour.utilities.is_string",
"colour.utilities.validate_method",
"numpy.array",
"colour.algebra.normalise_vector",
"colour.utilities.optional",
"colour.colorimetry.sd_to_XYZ",
"numpy.reshape",
"colour.models.xy_to_XYZ",
"colour.utilities.as_float_array",
"numpy.linspace",
"col... | [((2442, 2458), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (2456, 2458), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((10574, 10590), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (10588, 10590), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((15080, 15096), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (15094, 15096), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((18243, 18259), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (18257, 18259), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((19932, 19948), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (19946, 19948), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((21641, 21657), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (21655, 21657), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((23350, 23366), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (23364, 23366), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((32491, 32507), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (32505, 32507), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((37162, 37178), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (37176, 37178), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((41877, 41893), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (41891, 41893), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((4432, 4501), 'colour.utilities.validate_method', 'validate_method', (['method', "['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS']"], {}), "(method, ['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS'])\n", (4447, 4501), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((4546, 4614), 'colour.utilities.optional', 'optional', (['spectral_locus_colours', 'CONSTANTS_COLOUR_STYLE.colour.dark'], {}), '(spectral_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark)\n', (4554, 4614), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((4728, 4746), 'colour.plotting.artist', 'artist', ([], {}), '(**settings)\n', (4734, 4746), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((4978, 4999), 'numpy.array', 'np.array', (['([1 / 3] * 2)'], {}), '([1 / 3] * 2)\n', (4986, 4999), True, 'import numpy as np\n'), ((10554, 10570), 'colour.plotting.render', 'render', ([], {}), '(**kwargs)\n', (10560, 10570), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((12609, 12678), 'colour.utilities.validate_method', 'validate_method', (['method', "['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS']"], {}), "(method, ['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS'])\n", (12624, 12678), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((12792, 12810), 'colour.plotting.artist', 'artist', ([], {}), '(**settings)\n', (12798, 12810), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((15060, 15076), 'colour.plotting.render', 'render', ([], {}), '(**kwargs)\n', (15066, 15076), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((16803, 16872), 'colour.utilities.validate_method', 'validate_method', (['method', "['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS']"], {}), "(method, ['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS'])\n", (16818, 16872), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((16986, 17004), 'colour.plotting.artist', 'artist', ([], {}), '(**settings)\n', (16992, 17004), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((18221, 18239), 'colour.plotting.render', 'render', ([], {}), '(**settings)\n', (18227, 18239), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((28297, 28366), 'colour.utilities.validate_method', 'validate_method', (['method', "['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS']"], {}), "(method, ['CIE 1931', 'CIE 1960 UCS', 'CIE 1976 UCS'])\n", (28312, 28366), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((28402, 28426), 'colour.colorimetry.sds_and_msds_to_sds', 'sds_and_msds_to_sds', (['sds'], {}), '(sds)\n', (28421, 28426), False, 'from colour.colorimetry import MultiSpectralDistributions, SDS_ILLUMINANTS, SpectralDistribution, sd_to_XYZ, sds_and_msds_to_sds\n'), ((28526, 28544), 'colour.plotting.artist', 'artist', ([], {}), '(**settings)\n', (28532, 28544), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((32469, 32487), 'colour.plotting.render', 'render', ([], {}), '(**settings)\n', (32475, 32487), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((5043, 5077), 'colour.models.XYZ_to_xy', 'XYZ_to_xy', (['cmfs.values', 'illuminant'], {}), '(cmfs.values, illuminant)\n', (5052, 5077), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((8779, 8801), 'colour.utilities.as_float_array', 'as_float_array', (['[ij_l]'], {}), '([ij_l])\n', (8793, 8801), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((8817, 8829), 'colour.utilities.tsplit', 'tsplit', (['ij_l'], {}), '(ij_l)\n', (8823, 8829), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((8847, 8880), 'bisect.bisect', 'bisect.bisect', (['wavelengths', 'label'], {}), '(wavelengths, label)\n', (8860, 8880), False, 'import bisect\n'), ((9179, 9198), 'numpy.array', 'np.array', (['[-dy, dx]'], {}), '([-dy, dx])\n', (9187, 9198), True, 'import numpy as np\n'), ((13198, 13232), 'colour.models.XYZ_to_xy', 'XYZ_to_xy', (['cmfs.values', 'illuminant'], {}), '(cmfs.values, illuminant)\n', (13207, 13232), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((13690, 13706), 'colour.utilities.tstack', 'tstack', (['[ii, jj]'], {}), '([ii, jj])\n', (13696, 13706), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((5132, 5240), 'colour.utilities.optional', 'optional', (['spectral_locus_labels', '(390, 460, 470, 480, 490, 500, 510, 520, 540, 560, 580, 600, 620, 700)'], {}), '(spectral_locus_labels, (390, 460, 470, 480, 490, 500, 510, 520, \n 540, 560, 580, 600, 620, 700))\n', (5140, 5240), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((7489, 7500), 'numpy.copy', 'np.copy', (['ij'], {}), '(ij)\n', (7496, 7500), True, 'import numpy as np\n'), ((7694, 7734), 'colour.plotting.XYZ_to_plotting_colourspace', 'XYZ_to_plotting_colourspace', (['cmfs.values'], {}), '(cmfs.values)\n', (7721, 7734), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((7806, 7822), 'colour.models.xy_to_XYZ', 'xy_to_XYZ', (['pl_ij'], {}), '(pl_ij)\n', (7815, 7822), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((8371, 8420), 'numpy.concatenate', 'np.concatenate', (['[slp_ij[:-1], slp_ij[1:]]'], {'axis': '(1)'}), '([slp_ij[:-1], slp_ij[1:]], axis=1)\n', (8385, 8420), True, 'import numpy as np\n'), ((9231, 9250), 'numpy.array', 'np.array', (['[-dy, dx]'], {}), '([-dy, dx])\n', (9239, 9250), True, 'import numpy as np\n'), ((9421, 9440), 'numpy.array', 'np.array', (['[dy, -dx]'], {}), '([dy, -dx])\n', (9429, 9440), True, 'import numpy as np\n'), ((9590, 9623), 'colour.utilities.is_string', 'is_string', (['spectral_locus_colours'], {}), '(spectral_locus_colours)\n', (9599, 9623), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((12906, 12955), 'colour.notation.HEX_to_RGB', 'HEX_to_RGB', (['CONSTANTS_COLOUR_STYLE.colour.average'], {}), '(CONSTANTS_COLOUR_STYLE.colour.average)\n', (12916, 12955), False, 'from colour.notation import HEX_to_RGB\n'), ((13612, 13638), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'samples'], {}), '(0, 1, samples)\n', (13623, 13638), True, 'import numpy as np\n'), ((13640, 13666), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'samples'], {}), '(1, 0, samples)\n', (13651, 13666), True, 'import numpy as np\n'), ((13759, 13772), 'colour.models.xy_to_XYZ', 'xy_to_XYZ', (['ij'], {}), '(ij)\n', (13768, 13772), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((14001, 14045), 'colour.plotting.XYZ_to_plotting_colourspace', 'XYZ_to_plotting_colourspace', (['XYZ', 'illuminant'], {}), '(XYZ, illuminant)\n', (14028, 14045), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((28987, 29001), 'colour.models.XYZ_to_xy', 'XYZ_to_xy', (['XYZ'], {}), '(XYZ)\n', (28996, 29001), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((31731, 31754), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['"""1"""'], {}), "('1')\n", (31749, 31754), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((31774, 31805), 'colour.colorimetry.sd_to_XYZ', 'sd_to_XYZ', (['sd', 'cmfs', 'illuminant'], {}), '(sd, cmfs, illuminant)\n', (31783, 31805), False, 'from colour.colorimetry import MultiSpectralDistributions, SDS_ILLUMINANTS, SpectralDistribution, sd_to_XYZ, sds_and_msds_to_sds\n'), ((5651, 5674), 'colour.models.XYZ_to_UCS', 'XYZ_to_UCS', (['cmfs.values'], {}), '(cmfs.values)\n', (5661, 5674), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((5730, 5883), 'colour.utilities.optional', 'optional', (['spectral_locus_labels', '(420, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570,\n 580, 590, 600, 610, 620, 630, 645, 680)'], {}), '(spectral_locus_labels, (420, 440, 450, 460, 470, 480, 490, 500, \n 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630, 645, 680))\n', (5738, 5883), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((7334, 7370), 'numpy.linspace', 'np.linspace', (['ij[0][0]', 'ij[-1][0]', '(20)'], {}), '(ij[0][0], ij[-1][0], 20)\n', (7345, 7370), True, 'import numpy as np\n'), ((7388, 7424), 'numpy.linspace', 'np.linspace', (['ij[0][1]', 'ij[-1][1]', '(20)'], {}), '(ij[0][1], ij[-1][1], 20)\n', (7399, 7424), True, 'import numpy as np\n'), ((8089, 8113), 'numpy.reshape', 'np.reshape', (['XYZ', '(-1, 3)'], {}), '(XYZ, (-1, 3))\n', (8099, 8113), True, 'import numpy as np\n'), ((9483, 9507), 'colour.algebra.normalise_vector', 'normalise_vector', (['normal'], {}), '(normal)\n', (9499, 9507), False, 'from colour.algebra import normalise_maximum, normalise_vector\n'), ((13303, 13326), 'colour.models.XYZ_to_UCS', 'XYZ_to_UCS', (['cmfs.values'], {}), '(cmfs.values)\n', (13313, 13326), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((14262, 14307), 'numpy.hstack', 'np.hstack', (['[diagram_colours, diagram_opacity]'], {}), '([diagram_colours, diagram_opacity])\n', (14271, 14307), True, 'import numpy as np\n'), ((14382, 14427), 'numpy.hstack', 'np.hstack', (['[diagram_colours, diagram_opacity]'], {}), '([diagram_colours, diagram_opacity])\n', (14391, 14427), True, 'import numpy as np\n'), ((31969, 32001), 'colour.plotting.XYZ_to_plotting_colourspace', 'XYZ_to_plotting_colourspace', (['XYZ'], {}), '(XYZ)\n', (31996, 32001), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((4812, 4829), 'colour.plotting.filter_cmfs', 'filter_cmfs', (['cmfs'], {}), '(cmfs)\n', (4823, 4829), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((6474, 6509), 'colour.models.XYZ_to_Luv', 'XYZ_to_Luv', (['cmfs.values', 'illuminant'], {}), '(cmfs.values, illuminant)\n', (6484, 6509), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((6577, 6730), 'colour.utilities.optional', 'optional', (['spectral_locus_labels', '(420, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570,\n 580, 590, 600, 610, 620, 630, 645, 680)'], {}), '(spectral_locus_labels, (420, 440, 450, 460, 470, 480, 490, 500, \n 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630, 645, 680))\n', (6585, 6730), False, 'from colour.utilities import as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, validate_method\n'), ((7890, 7909), 'colour.models.UCS_uv_to_xy', 'UCS_uv_to_xy', (['pl_ij'], {}), '(pl_ij)\n', (7902, 7909), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((9290, 9327), 'colour.algebra.normalise_vector', 'normalise_vector', (['(ij_l - equal_energy)'], {}), '(ij_l - equal_energy)\n', (9306, 9327), False, 'from colour.algebra import normalise_maximum, normalise_vector\n'), ((9345, 9372), 'colour.algebra.normalise_vector', 'normalise_vector', (['direction'], {}), '(direction)\n', (9361, 9372), False, 'from colour.algebra import normalise_maximum, normalise_vector\n'), ((13038, 13055), 'colour.plotting.filter_cmfs', 'filter_cmfs', (['cmfs'], {}), '(cmfs)\n', (13049, 13055), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((13411, 13446), 'colour.models.XYZ_to_Luv', 'XYZ_to_Luv', (['cmfs.values', 'illuminant'], {}), '(cmfs.values, illuminant)\n', (13421, 13446), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((13840, 13856), 'colour.models.UCS_uv_to_xy', 'UCS_uv_to_xy', (['ij'], {}), '(ij)\n', (13852, 13856), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((17070, 17087), 'colour.plotting.filter_cmfs', 'filter_cmfs', (['cmfs'], {}), '(cmfs)\n', (17081, 17087), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection\n'), ((29296, 29311), 'colour.models.XYZ_to_UCS', 'XYZ_to_UCS', (['XYZ'], {}), '(XYZ)\n', (29306, 29311), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((7978, 7997), 'colour.models.Luv_uv_to_xy', 'Luv_uv_to_xy', (['pl_ij'], {}), '(pl_ij)\n', (7990, 7997), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((13925, 13941), 'colour.models.Luv_uv_to_xy', 'Luv_uv_to_xy', (['ij'], {}), '(ij)\n', (13937, 13941), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n'), ((29608, 29623), 'colour.models.XYZ_to_Luv', 'XYZ_to_Luv', (['XYZ'], {}), '(XYZ)\n', (29618, 29623), False, 'from colour.models import Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ\n')] |
from numpy.random import normal
from numpy import rint
import random
import time
from ortools.linear_solver import pywraplp
def main():
#-------------------------------------------
#randomize code created by Jeremy;
def prMatrix(x):
for row in x:
for val in row:
print(val,end=',')
print()
print()
# example array of task costs on different nodes
n=32 #number of tasks to be allocated
x = [ [50]*n, [40]*n, [40]*n, [10]*n] #8 nodes
print (x)
print('initial x:')
print('----------')
prMatrix(x)
####### @jsinger new perturbation code for cost matrix
# thresholds for changing costs
COEFFICIENT_OF_VARIATION=0.5 # c.o.v. = stdev / mean = sigma/mu
# try different values - between 0 and 1?
for i in range(len(x)):
for j in range(len(x[i])):
mu = x[i][j]
sigma = COEFFICIENT_OF_VARIATION * mu
updated_value = int(rint(normal(mu, sigma)))
x[i][j] = max(0, updated_value) # no negative costs!
##########
print('final x:')
print('----------')
prMatrix(x)
#-------------------------------------------
#begin Google-or Tool;
# Data
costs = x
num_workers = len(costs)
num_tasks = len(costs[0])
node_cap = [(n*0.15),(n*0.2),(n*0.2),(n*0.5)]
print (node_cap)
# Solver
# Create the mip solver with the SCIP backend.
#solver = pywraplp.Solver.CreateSolver('MIP')
solver = pywraplp.Solver('SolveAssignmentProblem',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
start = time.time()
edge_devices = ['192.168.1.222', '192.168.1.168', '192.168.1.182', '192.168.1.131']
new_edge_devices = []
e_devices='('
# Variables
# x[i, j] is an array of 0-1 variables, which will be 1
# if worker i is assigned to task j.
x = {}
for i in range(num_workers):
for j in range(num_tasks):
x[i, j] = solver.IntVar(0, 1, '')
# Constraints
# Number of tasks assinged to each node less than the node capacitiy!
for i in range(num_workers):
solver.Add(solver.Sum([x[i, j] for j in range(num_tasks)]) <= node_cap[i])
# Each task is assigned to exactly one worker.
for j in range(num_tasks):
solver.Add(solver.Sum([x[i, j] for i in range(num_workers)]) == 1)
# Objective
objective_terms = []
for i in range(num_workers):
for j in range(num_tasks):
objective_terms.append(costs[i][j] * x[i, j])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
print('Minimum cost = ', solver.Objective().Value())
#print()
final_Workers_IP=[0]*len(costs[1])
#print()
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
#print('Total cost = ', solver.Objective().Value(), '\n')
for i in range(num_workers):
for j in range(num_tasks):
# Test if x[i,j] is 1 (with tolerance for floating point arithmetic).
if x[i, j].solution_value() > 0.5:
final_Workers_IP[j]='\''+edge_devices [i]+'\' '
e_devices+='\''+edge_devices [i]+'\' '
print('Edge node %d assigned to task %d. Cost = %d' % (i, j, costs[i][j]))
print()
end = time.time()
print("Time = ", round(end - start, 4), "seconds")
#print (new_edge_devices)
e_devices = e_devices[:-1]
e_devices+=')'
finalIPsBashFormat='('
for i in range(num_tasks):
finalIPsBashFormat+=final_Workers_IP[i]
finalIPsBashFormat= finalIPsBashFormat[:-1]
finalIPsBashFormat+=')'
print ()
print(finalIPsBashFormat)
if __name__ == '__main__':
main()
| [
"numpy.random.normal",
"time.time",
"ortools.linear_solver.pywraplp.Solver"
] | [((1508, 1601), 'ortools.linear_solver.pywraplp.Solver', 'pywraplp.Solver', (['"""SolveAssignmentProblem"""', 'pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING'], {}), "('SolveAssignmentProblem', pywraplp.Solver.\n CBC_MIXED_INTEGER_PROGRAMMING)\n", (1523, 1601), False, 'from ortools.linear_solver import pywraplp\n'), ((1639, 1650), 'time.time', 'time.time', ([], {}), '()\n', (1648, 1650), False, 'import time\n'), ((3410, 3421), 'time.time', 'time.time', ([], {}), '()\n', (3419, 3421), False, 'import time\n'), ((981, 998), 'numpy.random.normal', 'normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (987, 998), False, 'from numpy.random import normal\n')] |
# coding: utf-8
from __future__ import print_function, division
import numpy as np
import pyexotica as exo
__all__ = ["check_dynamics_solver_derivatives"]
def check_dynamics_solver_derivatives(name, urdf=None, srdf=None, joint_group=None):
ds = None
if urdf is not None and srdf is not None and joint_group is not None:
my_scene_init = exo.Initializers.SceneInitializer()
my_scene_init[1]['URDF'] = urdf
my_scene_init[1]['SRDF'] = srdf
my_scene_init[1]['JointGroup'] = joint_group
my_scene_init[1]['DynamicsSolver'] = [(name, {'Name': u'MyDynamicsSolver'})]
scene = exo.Setup.create_scene(exo.Initializers.Initializer(my_scene_init))
ds = scene.get_dynamics_solver()
else:
ds = exo.Setup.create_dynamics_solver((name, {'Name': u'MyDynamicsSolver'}))
# Check dimensions
x = np.random.random((ds.nx,))
# Use default quaternion
if ds.ndx != ds.nq + ds.nv:
x[3:6] = 0.
x[6] = 1.
u = np.random.random((ds.nu,))
# f should return tangent vector type
assert ds.f(x,u).shape[0] == ds.ndx
# fx should be (ds.ndx,ds.ndx)
assert ds.fx(x,u).shape[0] == ds.ndx and ds.fx(x,u).shape[1] == ds.ndx
# fu should be (ds.ndx,ds.nu)
assert ds.fu(x,u).shape[0] == ds.ndx and ds.fu(x,u).shape[1] == ds.nu
# Check integration / simulate
dx = ds.f(x,u)
np.testing.assert_array_equal(ds.simulate(x, u, 0.01), ds.integrate(x, dx, 0.01))
# Checking finite difference derivatives
## fu
fu = ds.fu(x,u)
fu_fd = ds.fu_fd(x,u)
# if np.linalg.norm(fu-fu_fd) > 1e-3 or np.any(np.isnan(fu)):
# print(fu-fu_fd<1e-3)
# print(fu-fu_fd)
# print("fu\n",fu)
# print("fu_fd\n",fu_fd)
np.testing.assert_allclose(fu, fu_fd, rtol=1e-5, atol=1e-5)
## fx
fx = ds.fx(x,u)
fx_fd = ds.fx_fd(x,u)
# if np.linalg.norm(fx-fx_fd) > 1e-3 or np.any(np.isnan(fx)):
# print(fx-fx_fd<1e-3)
# print("fx\n",fx)
# print("fx_fd\n",fx_fd)
np.testing.assert_allclose(fx, fx_fd, rtol=1e-5, atol=1e-5)
# Check joint computation
ds.compute_derivatives(x, u)
fx_joint = ds.get_fx()
fu_joint = ds.get_fu()
np.testing.assert_allclose(fx, fx_joint, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(fu, fu_joint, rtol=1e-5, atol=1e-5)
| [
"numpy.random.random",
"numpy.testing.assert_allclose",
"pyexotica.Initializers.Initializer",
"pyexotica.Initializers.SceneInitializer",
"pyexotica.Setup.create_dynamics_solver"
] | [((861, 887), 'numpy.random.random', 'np.random.random', (['(ds.nx,)'], {}), '((ds.nx,))\n', (877, 887), True, 'import numpy as np\n'), ((995, 1021), 'numpy.random.random', 'np.random.random', (['(ds.nu,)'], {}), '((ds.nu,))\n', (1011, 1021), True, 'import numpy as np\n'), ((1754, 1815), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fu', 'fu_fd'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(fu, fu_fd, rtol=1e-05, atol=1e-05)\n', (1780, 1815), True, 'import numpy as np\n'), ((2032, 2093), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fx', 'fx_fd'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(fx, fx_fd, rtol=1e-05, atol=1e-05)\n', (2058, 2093), True, 'import numpy as np\n'), ((2214, 2278), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fx', 'fx_joint'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(fx, fx_joint, rtol=1e-05, atol=1e-05)\n', (2240, 2278), True, 'import numpy as np\n'), ((2281, 2345), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fu', 'fu_joint'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(fu, fu_joint, rtol=1e-05, atol=1e-05)\n', (2307, 2345), True, 'import numpy as np\n'), ((355, 390), 'pyexotica.Initializers.SceneInitializer', 'exo.Initializers.SceneInitializer', ([], {}), '()\n', (388, 390), True, 'import pyexotica as exo\n'), ((757, 828), 'pyexotica.Setup.create_dynamics_solver', 'exo.Setup.create_dynamics_solver', (["(name, {'Name': u'MyDynamicsSolver'})"], {}), "((name, {'Name': u'MyDynamicsSolver'}))\n", (789, 828), True, 'import pyexotica as exo\n'), ((648, 691), 'pyexotica.Initializers.Initializer', 'exo.Initializers.Initializer', (['my_scene_init'], {}), '(my_scene_init)\n', (676, 691), True, 'import pyexotica as exo\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from .analysis import polyfit
#for task 2E
def plot_water_levels(station, dates, levels):
"""displays a plot of the water level data against time for a station"""
# Plot
plt.plot(dates, levels)
#adds plot lines for typical low and high levels
levelRange = station.typical_range
plt.axhline(y=levelRange[0])
plt.axhline(y=levelRange[1])
# Add axis labels, rotate date labels and add plot title
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
plt.show()
#for task 2F
def plot_water_level_with_fit(station, dates, levels, p):
"""plots water level data and best fit polynomial"""
poly, d0 = polyfit(dates, levels, p)
#format data
dates = matplotlib.dates.date2num(dates) - d0
x1 = np.linspace(dates[0],dates[-1],30)
#plot
plt.plot(dates, levels, '.')
plt.plot(x1, poly(x1))
#adds plot for typical high/low range
plt.plot(x1, np.linspace(station.typical_range[0],station.typical_range[0],30),"-r")
plt.plot(x1, np.linspace(station.typical_range[1],station.typical_range[1],30),"-r")
#add titles and labels
plt.xlabel("days ago")
plt.ylabel("water level(m)")
plt.title(station.name)
plt.show() | [
"matplotlib.dates.date2num",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((296, 319), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {}), '(dates, levels)\n', (304, 319), True, 'import matplotlib.pyplot as plt\n'), ((417, 445), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'levelRange[0]'}), '(y=levelRange[0])\n', (428, 445), True, 'import matplotlib.pyplot as plt\n'), ((450, 478), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'levelRange[1]'}), '(y=levelRange[1])\n', (461, 478), True, 'import matplotlib.pyplot as plt\n'), ((546, 564), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (556, 564), True, 'import matplotlib.pyplot as plt\n'), ((569, 598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level (m)"""'], {}), "('water level (m)')\n", (579, 598), True, 'import matplotlib.pyplot as plt\n'), ((603, 626), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (613, 626), True, 'import matplotlib.pyplot as plt\n'), ((632, 655), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (641, 655), True, 'import matplotlib.pyplot as plt\n'), ((680, 698), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (696, 698), True, 'import matplotlib.pyplot as plt\n'), ((757, 767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (765, 767), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1057), 'numpy.linspace', 'np.linspace', (['dates[0]', 'dates[-1]', '(30)'], {}), '(dates[0], dates[-1], 30)\n', (1032, 1057), True, 'import numpy as np\n'), ((1072, 1100), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels', '"""."""'], {}), "(dates, levels, '.')\n", (1080, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""days ago"""'], {}), "('days ago')\n", (1395, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level(m)"""'], {}), "('water level(m)')\n", (1422, 1440), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1468), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (1454, 1468), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1484), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1482, 1484), True, 'import matplotlib.pyplot as plt\n'), ((970, 1002), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (995, 1002), False, 'import matplotlib\n'), ((1192, 1259), 'numpy.linspace', 'np.linspace', (['station.typical_range[0]', 'station.typical_range[0]', '(30)'], {}), '(station.typical_range[0], station.typical_range[0], 30)\n', (1203, 1259), True, 'import numpy as np\n'), ((1281, 1348), 'numpy.linspace', 'np.linspace', (['station.typical_range[1]', 'station.typical_range[1]', '(30)'], {}), '(station.typical_range[1], station.typical_range[1], 30)\n', (1292, 1348), True, 'import numpy as np\n')] |
import numpy as np
from copy import deepcopy
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist
import networkx as nx
from molfunc.atoms import NNAtom, Atom
from molfunc.atoms import smiles_to_atoms, xyz_file_to_atoms
from molfunc.bonds import get_avg_bond_length
from molfunc.exceptions import *
from molfunc.utils import requires_atoms
from molfunc_ext import get_minimised_coords
from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
class Molecule:
@requires_atoms()
def make_graph(self, rel_tolerance=0.2):
"""
Make the molecular graph from the 'bonds' determined on a distance
criteria. No distinction is made between single, double etc. bond types
:param rel_tolerance: (float) Relative tolerance on what is classed
as a bond. If a distance is
< (1 + rel_tolerance) * r_avg
then they are 'bonded'
:return: None
"""
graph = nx.Graph()
for i in range(self.n_atoms):
graph.add_node(i, atom_label=self.atoms[i].label)
coordinates = self.get_coordinates()
dist_mat = distance_matrix(coordinates, coordinates)
# Loop over the unique pairs of atoms and add 'bonds'
for i in range(self.n_atoms):
for j in range(i+1, self.n_atoms):
avg_bond_length = get_avg_bond_length(self.atoms[i].label,
self.atoms[j].label)
# If the atoms are close enough add a bond (edge)
if dist_mat[i, j] <= avg_bond_length * (1.0 + rel_tolerance):
graph.add_edge(i, j)
self.graph = graph
return None
@requires_atoms()
def set_atomic_valancies(self):
"""Set the atomic valency for each atom. Double/triple bonds are *not*
distinct from single bonds"""
for i in range(self.n_atoms):
self.atoms[i].valence = len(list(self.graph.neighbors(i)))
return None
@requires_atoms()
def translate(self, vec):
"""Translate the molecule by vector (np.ndarray, length 3)"""
assert vec.shape == (3,)
for atom in self.atoms:
atom.translate(vec)
return None
@requires_atoms()
def print_xyz_file(self, title_line=''):
"""Print a standard .xyz file from the Molecule's atoms"""
with open(f'{self.name}.xyz', 'w') as xyz_file:
print(f'{self.n_atoms}\n'
f'{title_line}', file=xyz_file)
for atom in self.atoms:
x, y, z = atom.coord
print(f'{atom.label:<3}{x:^10.5f}{y:^10.5f}{z:^10.5f}',
file=xyz_file)
return None
@requires_atoms()
def get_coordinates(self):
"""Return a n_atoms x 3 shape np.ndarray containing xyz coordinates"""
return np.array([atom.coord for atom in self.atoms])
def set_atoms(self, atoms):
"""Set the atoms (list(molfunc.atoms.Atom)) and the number of atoms"""
assert type(atoms) is list
if len(atoms) > 0:
assert isinstance(atoms[0], Atom)
self.atoms = atoms
self.n_atoms = len(atoms)
return None
def __init__(self, name='mol', xyz_filename=None, smiles=None, atoms=None):
"""
Base molecule class. Initialised in order of priority: SMILES string,
xyz file, atoms
------------------------ Keyword Arguments ----------------------------
:param name: (str)
:param xyz_filename: (str) .xyz filename (or filepath) from which atoms
will be extracted
:param smiles: (str) SMILES string defining the molecule from which a
3D structure as atoms are extracted using RDKit
:param atoms: (list(molfunc.atom.Atom)) List of atoms used to
initialise the molecule
"""
self.name = str(name)
self.n_atoms = 0
self.graph = None
self.atoms = None
if smiles is not None:
# Use RDKit to convert SMILES -> atoms
self.set_atoms(atoms=smiles_to_atoms(smiles))
if xyz_filename is not None:
# Initialisation with an xyz file takes precedence over SMILES
self.set_atoms(atoms=xyz_file_to_atoms(xyz_filename))
if atoms is not None:
self.set_atoms(atoms)
if self.n_atoms != 0:
# If there are atoms in the molecule set the graph and valancies
self.make_graph()
self.set_atomic_valancies()
class CoreMolecule(Molecule):
def _check_datom_idxs(self):
"""Ensure that all atoms to be replaced by fragments are monovalent"""
for i in self.datom_idxs:
if not 0 <= i < self.n_atoms:
raise DatomsNotValid(f'Can\'t functionalise an atom {i} - not '
f'in the list of atoms')
if self.atoms[i].valence == 1:
continue
raise DatomsNotValid(f'Cannot modify atom {self.atoms[i].label} '
f'with valency {self.atoms[i].valence}')
return None
def get_nn_atoms(self):
"""Return the nearest neighbours to all the datom_idxs"""
return [self.get_datom_nn(i) for i in self.datom_idxs]
def get_datom_nn(self, datom_idx):
"""
Return the nearest neighbour atom to a particular atom to delete
(datom) along with the shift vector e.g. for a datom_idx = 1, the
nearest neighbour is C and the vector
vec -->
j i atoms:
C -- H C, 0, 0, 0
/ H, 1, 0, 0
H H, -1, 0, -1
:param datom_idx: (int) index of the atom to delete
:return: (molfunc.atoms.NNatom)
"""
# Iterate through the bonds in the molecule
for (i, j) in self.graph.edges:
vec = self.atoms[i].coord - self.atoms[j].coord
if i == datom_idx:
return NNAtom(atom=self.atoms[j], shift_vec=vec)
if j == datom_idx:
return NNAtom(atom=self.atoms[i], shift_vec=-vec)
raise DatomsNotValid('Atom to delete did not have a nearest neighbour')
def _delete_datoms(self):
"""Remove all datoms from the atoms list and set the atoms"""
return self.set_atoms(atoms=[atom for i, atom in enumerate(self.atoms)
if i not in self.datom_idxs])
def __init__(self, name='mol', xyz_filename=None, smiles=None,
atoms_to_del=None, atoms=None):
"""
Core molecule class
:param name: (str)
:param atoms_to_del: (list(int)) List of atom indexes to delete and
swap for a fragment. *Indexed from 1*
:param xyz_filename: (str)
:param smiles: (str) SMILES string
:param atoms: (list(molfunc.atom.Atom)) List of atoms
"""
super().__init__(name, xyz_filename, smiles, atoms)
# Atom indexes to delete are indexed from 1. molfunc however indexes
# atoms from 0 so atoms_to_del are the indexes minus 1
self.datom_idxs = [i - 1 for i in atoms_to_del] if atoms_to_del is not None else []
self._check_datom_idxs()
# Nearest neighbour atoms to those deleted to enable translation of the
# fragment
self.nn_atoms = self.get_nn_atoms()
# Remove the atoms in the datom_idxs list from the atoms
self._delete_datoms()
class FragmentMolecule(Molecule):
def get_ratom_nearest_neighbour(self):
"""
Return the nearest neighbour atom to the atom with label 'R' the place
holder atom that is deleted when the Fragment and Core molecules are
bonded. It needs to be monovalent..
:return: (molfunc.atoms.NNatom)
"""
# Ensure there is one R
if not any(atom.label == 'R' for atom in self.atoms):
raise RAtomNotFound
# Find the first (hopefully only) monovalent R atom in the molecule
for edge in self.graph.edges:
for (i, j) in [edge, reversed(edge)]:
if self.atoms[i].label != 'R':
continue
if self.atoms[i].valence > 1:
raise RAtomInvalidValence
return NNAtom(atom=self.atoms[j])
# There is at least one R atom that is not bonded to any atom - return
# the closest atom to to the R atom
for i, atom in enumerate(self.atoms):
if atom.label != 'R':
continue
distances = cdist(np.array([atom.coord]), self.get_coordinates())
# Nearest neighbour is the second in the sorted index array, with
# the first being the atom itself
nn_atom_idx = np.argsort(distances)[0, 1]
return NNAtom(atom=self.atoms[nn_atom_idx])
raise RAtomNotFound
def minimise_repulsion(self, other_mol):
"""
Minimise the 'energy' with respect to rigid body rotation of this
molecule given some other (core) molecule
:param other_mol: (molfunc.CoreMolecule)
"""
# Coords where the nearest neighbour atom is centered at the origin
other_coords = other_mol.get_coordinates() - self.nn_atom.coord
coords = self.get_coordinates() - self.nn_atom.coord
# Minimise the energy with respect to rotation (lowest repulsion)
new_coords = get_minimised_coords(py_coords=coords,
py_other_coords=other_coords)
for i in range(self.n_atoms):
self.atoms[i].coord = new_coords[i] + self.nn_atom.coord
return None
def _delete_r_atom(self):
"""Delete the atom with label 'R' from the atoms"""
return self.set_atoms(atoms=[atom for atom in self.atoms if atom.label != 'R'])
def shift_to_core_atom(self, core_atom):
"""Given a core atom (molfunc.atoms.NNAtom) shift the fragment so the
R atom nearest neighbour is the ideal length away from the core atom"""
# Translate so the fragment nn atom is on top of the core atom
self.translate(vec=core_atom.coord - self.nn_atom.coord)
# Translate so the fragment has is the correct bond distance away
ideal_bond_length = get_avg_bond_length(atom_i_label=core_atom.label,
atom_j_label=self.nn_atom.label)
self.translate(vec=core_atom.shift_vec * ideal_bond_length)
# Update nn_atom coord as it will be used as the origin for rotation
self.nn_atom.coord = core_atom.coord + core_atom.shift_vec * ideal_bond_length
return None
def __init__(self, name='mol', xyz_filename=None, smiles=None, atoms=None):
"""
Fragment molecule class
e.g. for a methyl self.atoms should be:
C, 0, 0, 0
R, 1, 0, 1
H -1, 0, 1
H 0, 1, -1
H 0, -1, -1
where the R atom will be removed and replaced for the core molecule.
The C atom will be the nn_atom (closest to R)
:param name: (str)
:param core_atom: (molfunc.atoms.NNAtom) The atom on the core molecule
to which this fragment will be attached
:param xyz_filename: (str)
:param smiles: (str) SMILES string e.g. 'C[*]' or 'C[Fr]'
:param atoms: (list(molfunc.atom.Atom)) List of atoms
"""
super().__init__(name, xyz_filename, smiles, atoms)
# Get the nearest neighbour atom to R then delete the R atom
self.nn_atom = self.get_ratom_nearest_neighbour()
self._delete_r_atom()
class CombinedMolecule(Molecule):
def _check(self):
"""Check for features required to build this combined molecule"""
if len(self.fragments) != self.n_fragments_to_add:
raise CombinationFailed('Number of fragments is not equal to the'
'number of fragments to add (core datoms)')
# If the nearest neighbour atoms are not set then set them
if len(self.core_mol.nn_atoms) == 0:
raise CombinationFailed('Atoms to delete in the core molecule'
'had no nearest neighbours set')
return None
def build(self):
"""Build the combined molecule by iterating through self.fragments
fragments minimising the repulsion to the core mol at each point"""
self._check()
atoms = self.core_mol.atoms.copy()
for i, fragment_mol in enumerate(self.fragments):
fragment_mol.shift_to_core_atom(self.core_mol.nn_atoms[i])
# Repulsion is to both the core and previously added fragments
fragment_mol.minimise_repulsion(other_mol=Molecule(atoms=atoms))
atoms += fragment_mol.atoms
self.set_atoms(atoms)
return None
def __init__(self, core_mol, frag_smiles=None, frag_smiles_list=None,
name='mol', fragment=None, fragments=None):
"""
Combined molecule class
Fragments can be added from SMILES strings (e.g. C[*]), a list of
SMILES strings (if the core atom is functionalised more than once),
a FragmentMolecule or a list of FragmentMolecules again if there is
more than 1 functionalisation to be performed
:param name: (str) Name of the molecule
:param core_mol: (molfunc.molecules.CoreMolecule) Core molecule that
will be functionalised
:param frag_smiles: (str) SMILES string to add to the core molecule
in *all* the core_mol.datom_idxs positions. For
example a methyl fragment: 'C[*]'
:param frag_smiles_list: (list(str)) List of SMILES strings that will
be added to the core molecule in sequence. The
length must be equal len(core_mol.datom_idxs)
:param fragment: (molfunc.molecules.FragmentMolecule) A pre-generated
fragment object. Perhaps initialised from a .xyz file
containing an 'R' atom which is closer than 1.5 Å
to a single atom
:param fragments: (list(molfunc.molecules.FragmentMolecule)) List of
FragmentMolecule to add in sequence. Length of the
list must equal len(core_mol.datom_idxs)
"""
super().__init__(name=name)
self.core_mol = core_mol
self.n_fragments_to_add = len(core_mol.datom_idxs)
if self.n_fragments_to_add == 0:
raise CombinationFailed('Core molecule had no datoms')
self.fragments = []
if frag_smiles is not None:
self.fragments = [FragmentMolecule(smiles=frag_smiles) for _ in range(self.n_fragments_to_add)]
if frag_smiles_list is not None:
self.fragments = [FragmentMolecule(smiles=smiles) for smiles in frag_smiles_list]
if fragment is not None:
assert isinstance(fragment, FragmentMolecule)
self.fragments = [deepcopy(fragment) for _ in range(self.n_fragments_to_add)]
if fragments is not None:
assert all(isinstance(fr, FragmentMolecule) for fr in fragments)
self.fragments = fragments
# If there are some fragments then build the combined molecule
if len(self.fragments) > 0:
self.build()
| [
"molfunc.atoms.xyz_file_to_atoms",
"molfunc_ext.get_minimised_coords",
"molfunc.atoms.NNAtom",
"scipy.spatial.distance_matrix",
"molfunc.utils.requires_atoms",
"networkx.Graph",
"molfunc.bonds.get_avg_bond_length",
"numpy.array",
"numpy.argsort",
"copy.deepcopy",
"rdkit.rdBase.DisableLog",
"mo... | [((440, 472), 'rdkit.rdBase.DisableLog', 'rdBase.DisableLog', (['"""rdApp.error"""'], {}), "('rdApp.error')\n", (457, 472), False, 'from rdkit import rdBase\n'), ((497, 513), 'molfunc.utils.requires_atoms', 'requires_atoms', ([], {}), '()\n', (511, 513), False, 'from molfunc.utils import requires_atoms\n'), ((1806, 1822), 'molfunc.utils.requires_atoms', 'requires_atoms', ([], {}), '()\n', (1820, 1822), False, 'from molfunc.utils import requires_atoms\n'), ((2113, 2129), 'molfunc.utils.requires_atoms', 'requires_atoms', ([], {}), '()\n', (2127, 2129), False, 'from molfunc.utils import requires_atoms\n'), ((2354, 2370), 'molfunc.utils.requires_atoms', 'requires_atoms', ([], {}), '()\n', (2368, 2370), False, 'from molfunc.utils import requires_atoms\n'), ((2838, 2854), 'molfunc.utils.requires_atoms', 'requires_atoms', ([], {}), '()\n', (2852, 2854), False, 'from molfunc.utils import requires_atoms\n'), ((1048, 1058), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1056, 1058), True, 'import networkx as nx\n'), ((1224, 1265), 'scipy.spatial.distance_matrix', 'distance_matrix', (['coordinates', 'coordinates'], {}), '(coordinates, coordinates)\n', (1239, 1265), False, 'from scipy.spatial import distance_matrix\n'), ((2980, 3025), 'numpy.array', 'np.array', (['[atom.coord for atom in self.atoms]'], {}), '([atom.coord for atom in self.atoms])\n', (2988, 3025), True, 'import numpy as np\n'), ((9786, 9854), 'molfunc_ext.get_minimised_coords', 'get_minimised_coords', ([], {'py_coords': 'coords', 'py_other_coords': 'other_coords'}), '(py_coords=coords, py_other_coords=other_coords)\n', (9806, 9854), False, 'from molfunc_ext import get_minimised_coords\n'), ((10649, 10736), 'molfunc.bonds.get_avg_bond_length', 'get_avg_bond_length', ([], {'atom_i_label': 'core_atom.label', 'atom_j_label': 'self.nn_atom.label'}), '(atom_i_label=core_atom.label, atom_j_label=self.nn_atom\n .label)\n', (10668, 10736), False, 'from molfunc.bonds import get_avg_bond_length\n'), ((9171, 9207), 'molfunc.atoms.NNAtom', 'NNAtom', ([], {'atom': 'self.atoms[nn_atom_idx]'}), '(atom=self.atoms[nn_atom_idx])\n', (9177, 9207), False, 'from molfunc.atoms import NNAtom, Atom\n'), ((1449, 1510), 'molfunc.bonds.get_avg_bond_length', 'get_avg_bond_length', (['self.atoms[i].label', 'self.atoms[j].label'], {}), '(self.atoms[i].label, self.atoms[j].label)\n', (1468, 1510), False, 'from molfunc.bonds import get_avg_bond_length\n'), ((6292, 6333), 'molfunc.atoms.NNAtom', 'NNAtom', ([], {'atom': 'self.atoms[j]', 'shift_vec': 'vec'}), '(atom=self.atoms[j], shift_vec=vec)\n', (6298, 6333), False, 'from molfunc.atoms import NNAtom, Atom\n'), ((6388, 6430), 'molfunc.atoms.NNAtom', 'NNAtom', ([], {'atom': 'self.atoms[i]', 'shift_vec': '(-vec)'}), '(atom=self.atoms[i], shift_vec=-vec)\n', (6394, 6430), False, 'from molfunc.atoms import NNAtom, Atom\n'), ((8636, 8662), 'molfunc.atoms.NNAtom', 'NNAtom', ([], {'atom': 'self.atoms[j]'}), '(atom=self.atoms[j])\n', (8642, 8662), False, 'from molfunc.atoms import NNAtom, Atom\n'), ((8924, 8946), 'numpy.array', 'np.array', (['[atom.coord]'], {}), '([atom.coord])\n', (8932, 8946), True, 'import numpy as np\n'), ((9123, 9144), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (9133, 9144), True, 'import numpy as np\n'), ((15535, 15553), 'copy.deepcopy', 'deepcopy', (['fragment'], {}), '(fragment)\n', (15543, 15553), False, 'from copy import deepcopy\n'), ((4262, 4285), 'molfunc.atoms.smiles_to_atoms', 'smiles_to_atoms', (['smiles'], {}), '(smiles)\n', (4277, 4285), False, 'from molfunc.atoms import smiles_to_atoms, xyz_file_to_atoms\n'), ((4433, 4464), 'molfunc.atoms.xyz_file_to_atoms', 'xyz_file_to_atoms', (['xyz_filename'], {}), '(xyz_filename)\n', (4450, 4464), False, 'from molfunc.atoms import smiles_to_atoms, xyz_file_to_atoms\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
import pytest
from zarr.util import (normalize_shape, normalize_chunks, is_total_slice,
normalize_resize_args, human_readable_size, normalize_order,
guess_chunks, info_html_report, info_text_report,
normalize_fill_value)
def test_normalize_shape():
assert (100,) == normalize_shape((100,))
assert (100,) == normalize_shape([100])
assert (100,) == normalize_shape(100)
with pytest.raises(TypeError):
normalize_shape(None)
with pytest.raises(ValueError):
normalize_shape('foo')
def test_normalize_chunks():
assert (10,) == normalize_chunks((10,), (100,), 1)
assert (10,) == normalize_chunks([10], (100,), 1)
assert (10,) == normalize_chunks(10, (100,), 1)
assert (10, 10) == normalize_chunks((10, 10), (100, 10), 1)
assert (10, 10) == normalize_chunks(10, (100, 10), 1)
assert (10, 10) == normalize_chunks((10, None), (100, 10), 1)
assert (30, 20, 10) == normalize_chunks(30, (100, 20, 10), 1)
assert (30, 20, 10) == normalize_chunks((30,), (100, 20, 10), 1)
assert (30, 20, 10) == normalize_chunks((30, None), (100, 20, 10), 1)
assert (30, 20, 10) == normalize_chunks((30, None, None), (100, 20, 10), 1)
assert (30, 20, 10) == normalize_chunks((30, 20, None), (100, 20, 10), 1)
assert (30, 20, 10) == normalize_chunks((30, 20, 10), (100, 20, 10), 1)
with pytest.raises(ValueError):
normalize_chunks('foo', (100,), 1)
with pytest.raises(ValueError):
normalize_chunks((100, 10), (100,), 1)
# test auto-chunking
chunks = normalize_chunks(None, (100,), 1)
assert (100,) == chunks
def test_is_total_slice():
# 1D
assert is_total_slice(Ellipsis, (100,))
assert is_total_slice(slice(None), (100,))
assert is_total_slice(slice(0, 100), (100,))
assert not is_total_slice(slice(0, 50), (100,))
assert not is_total_slice(slice(0, 100, 2), (100,))
# 2D
assert is_total_slice(Ellipsis, (100, 100))
assert is_total_slice(slice(None), (100, 100))
assert is_total_slice((slice(None), slice(None)), (100, 100))
assert is_total_slice((slice(0, 100), slice(0, 100)), (100, 100))
assert not is_total_slice((slice(0, 100), slice(0, 50)), (100, 100))
assert not is_total_slice((slice(0, 50), slice(0, 100)), (100, 100))
assert not is_total_slice((slice(0, 50), slice(0, 50)), (100, 100))
assert not is_total_slice((slice(0, 100, 2), slice(0, 100)), (100, 100))
with pytest.raises(TypeError):
is_total_slice('foo', (100,))
def test_normalize_resize_args():
# 1D
assert (200,) == normalize_resize_args((100,), 200)
assert (200,) == normalize_resize_args((100,), (200,))
# 2D
assert (200, 100) == normalize_resize_args((100, 100), (200, 100))
assert (200, 100) == normalize_resize_args((100, 100), (200, None))
assert (200, 100) == normalize_resize_args((100, 100), 200, 100)
assert (200, 100) == normalize_resize_args((100, 100), 200, None)
with pytest.raises(ValueError):
normalize_resize_args((100,), (200, 100))
def test_human_readable_size():
assert '100' == human_readable_size(100)
assert '1.0K' == human_readable_size(2**10)
assert '1.0M' == human_readable_size(2**20)
assert '1.0G' == human_readable_size(2**30)
assert '1.0T' == human_readable_size(2**40)
assert '1.0P' == human_readable_size(2**50)
def test_normalize_order():
assert 'F' == normalize_order('F')
assert 'C' == normalize_order('C')
assert 'F' == normalize_order('f')
assert 'C' == normalize_order('c')
with pytest.raises(ValueError):
normalize_order('foo')
def test_normalize_fill_value():
assert b'' == normalize_fill_value(0, dtype=np.dtype('S1'))
assert b'' == normalize_fill_value(0, dtype=np.dtype([('foo', 'i4'), ('bar', 'f8')]))
assert '' == normalize_fill_value(0, dtype=np.dtype('U1'))
def test_guess_chunks():
shapes = (
(100,),
(100, 100),
(1000000,),
(1000000000,),
(10000000000000000000000,),
(10000, 10000),
(10000000, 1000),
(1000, 10000000),
(10000000, 1000, 2),
(1000, 10000000, 2),
(10000, 10000, 10000),
(100000, 100000, 100000),
(1000000000, 1000000000, 1000000000),
(0,),
(0, 0),
(10, 0),
(0, 10),
(1, 2, 0, 4, 5),
)
for shape in shapes:
chunks = guess_chunks(shape, 1)
assert isinstance(chunks, tuple)
assert len(chunks) == len(shape)
# doesn't make any sense to allow chunks to have zero length dimension
assert all([0 < c <= max(s, 1) for c, s in zip(chunks, shape)])
# ludicrous itemsize
chunks = guess_chunks((1000000,), 40000000000)
assert isinstance(chunks, tuple)
assert (1,) == chunks
def test_info_text_report():
items = [('foo', 'bar'), ('baz', 'qux')]
expect = "foo : bar\nbaz : qux\n"
assert expect == info_text_report(items)
def test_info_html_report():
items = [('foo', 'bar'), ('baz', 'qux')]
actual = info_html_report(items)
assert '<table' == actual[:6]
assert '</table>' == actual[-8:]
| [
"zarr.util.normalize_resize_args",
"zarr.util.human_readable_size",
"zarr.util.is_total_slice",
"zarr.util.info_html_report",
"zarr.util.info_text_report",
"zarr.util.normalize_order",
"pytest.raises",
"zarr.util.normalize_shape",
"numpy.dtype",
"zarr.util.normalize_chunks",
"zarr.util.guess_chu... | [((1719, 1752), 'zarr.util.normalize_chunks', 'normalize_chunks', (['None', '(100,)', '(1)'], {}), '(None, (100,), 1)\n', (1735, 1752), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1831, 1863), 'zarr.util.is_total_slice', 'is_total_slice', (['Ellipsis', '(100,)'], {}), '(Ellipsis, (100,))\n', (1845, 1863), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((2089, 2125), 'zarr.util.is_total_slice', 'is_total_slice', (['Ellipsis', '(100, 100)'], {}), '(Ellipsis, (100, 100))\n', (2103, 2125), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((4880, 4917), 'zarr.util.guess_chunks', 'guess_chunks', (['(1000000,)', '(40000000000)'], {}), '((1000000,), 40000000000)\n', (4892, 4917), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((5229, 5252), 'zarr.util.info_html_report', 'info_html_report', (['items'], {}), '(items)\n', (5245, 5252), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((453, 476), 'zarr.util.normalize_shape', 'normalize_shape', (['(100,)'], {}), '((100,))\n', (468, 476), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((498, 520), 'zarr.util.normalize_shape', 'normalize_shape', (['[100]'], {}), '([100])\n', (513, 520), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((542, 562), 'zarr.util.normalize_shape', 'normalize_shape', (['(100)'], {}), '(100)\n', (557, 562), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((572, 596), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (585, 596), False, 'import pytest\n'), ((606, 627), 'zarr.util.normalize_shape', 'normalize_shape', (['None'], {}), '(None)\n', (621, 627), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((637, 662), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (650, 662), False, 'import pytest\n'), ((672, 694), 'zarr.util.normalize_shape', 'normalize_shape', (['"""foo"""'], {}), "('foo')\n", (687, 694), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((746, 780), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(10,)', '(100,)', '(1)'], {}), '((10,), (100,), 1)\n', (762, 780), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((801, 834), 'zarr.util.normalize_chunks', 'normalize_chunks', (['[10]', '(100,)', '(1)'], {}), '([10], (100,), 1)\n', (817, 834), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((855, 886), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(10)', '(100,)', '(1)'], {}), '(10, (100,), 1)\n', (871, 886), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((910, 950), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(10, 10)', '(100, 10)', '(1)'], {}), '((10, 10), (100, 10), 1)\n', (926, 950), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((974, 1008), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(10)', '(100, 10)', '(1)'], {}), '(10, (100, 10), 1)\n', (990, 1008), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1032, 1074), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(10, None)', '(100, 10)', '(1)'], {}), '((10, None), (100, 10), 1)\n', (1048, 1074), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1102, 1140), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(30)', '(100, 20, 10)', '(1)'], {}), '(30, (100, 20, 10), 1)\n', (1118, 1140), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1168, 1209), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(30,)', '(100, 20, 10)', '(1)'], {}), '((30,), (100, 20, 10), 1)\n', (1184, 1209), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1237, 1283), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(30, None)', '(100, 20, 10)', '(1)'], {}), '((30, None), (100, 20, 10), 1)\n', (1253, 1283), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1311, 1363), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(30, None, None)', '(100, 20, 10)', '(1)'], {}), '((30, None, None), (100, 20, 10), 1)\n', (1327, 1363), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1391, 1441), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(30, 20, None)', '(100, 20, 10)', '(1)'], {}), '((30, 20, None), (100, 20, 10), 1)\n', (1407, 1441), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1469, 1517), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(30, 20, 10)', '(100, 20, 10)', '(1)'], {}), '((30, 20, 10), (100, 20, 10), 1)\n', (1485, 1517), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1527, 1552), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1540, 1552), False, 'import pytest\n'), ((1562, 1596), 'zarr.util.normalize_chunks', 'normalize_chunks', (['"""foo"""', '(100,)', '(1)'], {}), "('foo', (100,), 1)\n", (1578, 1596), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((1606, 1631), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1619, 1631), False, 'import pytest\n'), ((1641, 1679), 'zarr.util.normalize_chunks', 'normalize_chunks', (['(100, 10)', '(100,)', '(1)'], {}), '((100, 10), (100,), 1)\n', (1657, 1679), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((2618, 2642), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2631, 2642), False, 'import pytest\n'), ((2652, 2681), 'zarr.util.is_total_slice', 'is_total_slice', (['"""foo"""', '(100,)'], {}), "('foo', (100,))\n", (2666, 2681), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((2749, 2783), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100,)', '(200)'], {}), '((100,), 200)\n', (2770, 2783), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((2805, 2842), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100,)', '(200,)'], {}), '((100,), (200,))\n', (2826, 2842), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((2878, 2923), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100, 100)', '(200, 100)'], {}), '((100, 100), (200, 100))\n', (2899, 2923), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((2949, 2995), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100, 100)', '(200, None)'], {}), '((100, 100), (200, None))\n', (2970, 2995), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3021, 3064), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100, 100)', '(200)', '(100)'], {}), '((100, 100), 200, 100)\n', (3042, 3064), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3090, 3134), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100, 100)', '(200)', 'None'], {}), '((100, 100), 200, None)\n', (3111, 3134), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3145, 3170), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3158, 3170), False, 'import pytest\n'), ((3180, 3221), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['(100,)', '(200, 100)'], {}), '((100,), (200, 100))\n', (3201, 3221), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3276, 3300), 'zarr.util.human_readable_size', 'human_readable_size', (['(100)'], {}), '(100)\n', (3295, 3300), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3322, 3350), 'zarr.util.human_readable_size', 'human_readable_size', (['(2 ** 10)'], {}), '(2 ** 10)\n', (3341, 3350), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3370, 3398), 'zarr.util.human_readable_size', 'human_readable_size', (['(2 ** 20)'], {}), '(2 ** 20)\n', (3389, 3398), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3418, 3446), 'zarr.util.human_readable_size', 'human_readable_size', (['(2 ** 30)'], {}), '(2 ** 30)\n', (3437, 3446), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3466, 3494), 'zarr.util.human_readable_size', 'human_readable_size', (['(2 ** 40)'], {}), '(2 ** 40)\n', (3485, 3494), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3514, 3542), 'zarr.util.human_readable_size', 'human_readable_size', (['(2 ** 50)'], {}), '(2 ** 50)\n', (3533, 3542), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3589, 3609), 'zarr.util.normalize_order', 'normalize_order', (['"""F"""'], {}), "('F')\n", (3604, 3609), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3628, 3648), 'zarr.util.normalize_order', 'normalize_order', (['"""C"""'], {}), "('C')\n", (3643, 3648), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3667, 3687), 'zarr.util.normalize_order', 'normalize_order', (['"""f"""'], {}), "('f')\n", (3682, 3687), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3706, 3726), 'zarr.util.normalize_order', 'normalize_order', (['"""c"""'], {}), "('c')\n", (3721, 3726), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3736, 3761), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3749, 3761), False, 'import pytest\n'), ((3771, 3793), 'zarr.util.normalize_order', 'normalize_order', (['"""foo"""'], {}), "('foo')\n", (3786, 3793), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((4585, 4607), 'zarr.util.guess_chunks', 'guess_chunks', (['shape', '(1)'], {}), '(shape, 1)\n', (4597, 4607), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((5116, 5139), 'zarr.util.info_text_report', 'info_text_report', (['items'], {}), '(items)\n', (5132, 5139), False, 'from zarr.util import normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value\n'), ((3877, 3891), 'numpy.dtype', 'np.dtype', (['"""S1"""'], {}), "('S1')\n", (3885, 3891), True, 'import numpy as np\n'), ((3941, 3981), 'numpy.dtype', 'np.dtype', (["[('foo', 'i4'), ('bar', 'f8')]"], {}), "([('foo', 'i4'), ('bar', 'f8')])\n", (3949, 3981), True, 'import numpy as np\n'), ((4030, 4044), 'numpy.dtype', 'np.dtype', (['"""U1"""'], {}), "('U1')\n", (4038, 4044), True, 'import numpy as np\n')] |
import numpy as np
import os
import pickle
import shutil
from jina.executors.encoders.numeric import TransformEncoder
from jina.executors import BaseExecutor
from .. import FastICAEncoder
input_dim = 28
target_output_dim = 2
train_data = np.random.rand(2000, input_dim)
def rm_files(tmp_files):
for file in tmp_files:
if file and os.path.exists(file):
if os.path.isfile(file):
os.remove(file)
elif os.path.isdir(file):
shutil.rmtree(file, ignore_errors=False, onerror=None)
def test_FastICATestCaseTrainCase():
requires_train_after_load = True
encoder = FastICAEncoder(
output_dim=target_output_dim, whiten=True, num_features=input_dim, max_iter=200)
encoder.train(train_data)
encoding_results(encoder)
save_and_load(encoder, requires_train_after_load)
save_and_load_config(encoder, requires_train_after_load)
rm_files([encoder.save_abspath, encoder.config_abspath, encoder.model_path])
def test_FastICATestCaseLoadCase():
requires_train_after_load = False
encoder = FastICAEncoder(
output_dim=target_output_dim, whiten=True, num_features=input_dim, max_iter=200)
encoder.train(train_data)
filename = 'ica_model.model'
pickle.dump(encoder.model, open(filename, 'wb'))
encoder = TransformEncoder(model_path=filename)
encoding_results(encoder)
save_and_load(encoder, requires_train_after_load)
save_and_load_config(encoder, requires_train_after_load)
rm_files([encoder.save_abspath, encoder.config_abspath, encoder.model_path])
def encoding_results(encoder):
test_data = np.random.rand(10, input_dim)
encoded_data = encoder.encode(test_data)
assert encoded_data.shape == (test_data.shape[0], target_output_dim)
assert type(encoded_data) is np.ndarray
def save_and_load(encoder, requires_train_after_load):
test_data = np.random.rand(10, input_dim)
encoded_data_control = encoder.encode(test_data)
encoder.touch()
encoder.save()
assert os.path.exists(encoder.save_abspath)
encoder_loaded = BaseExecutor.load(encoder.save_abspath)
if not requires_train_after_load:
# some models are not deterministic when training, so even with same training data, we cannot ensure
# same encoding results
encoded_data_test = encoder_loaded.encode(test_data)
np.testing.assert_array_equal(
encoded_data_test, encoded_data_control)
def save_and_load_config(encoder, requires_train_after_load):
test_data = np.random.rand(10, input_dim)
encoder.save_config()
assert os.path.exists(encoder.config_abspath)
encoder_loaded = BaseExecutor.load_config(encoder.config_abspath)
if requires_train_after_load:
encoder_loaded.train(train_data)
encoded_data_test = encoder_loaded.encode(test_data)
assert encoded_data_test.shape == (10, target_output_dim)
| [
"jina.executors.encoders.numeric.TransformEncoder",
"os.path.exists",
"numpy.random.rand",
"os.path.isfile",
"os.path.isdir",
"jina.executors.BaseExecutor.load_config",
"shutil.rmtree",
"jina.executors.BaseExecutor.load",
"numpy.testing.assert_array_equal",
"os.remove"
] | [((240, 271), 'numpy.random.rand', 'np.random.rand', (['(2000)', 'input_dim'], {}), '(2000, input_dim)\n', (254, 271), True, 'import numpy as np\n'), ((1320, 1357), 'jina.executors.encoders.numeric.TransformEncoder', 'TransformEncoder', ([], {'model_path': 'filename'}), '(model_path=filename)\n', (1336, 1357), False, 'from jina.executors.encoders.numeric import TransformEncoder\n'), ((1633, 1662), 'numpy.random.rand', 'np.random.rand', (['(10)', 'input_dim'], {}), '(10, input_dim)\n', (1647, 1662), True, 'import numpy as np\n'), ((1899, 1928), 'numpy.random.rand', 'np.random.rand', (['(10)', 'input_dim'], {}), '(10, input_dim)\n', (1913, 1928), True, 'import numpy as np\n'), ((2032, 2068), 'os.path.exists', 'os.path.exists', (['encoder.save_abspath'], {}), '(encoder.save_abspath)\n', (2046, 2068), False, 'import os\n'), ((2090, 2129), 'jina.executors.BaseExecutor.load', 'BaseExecutor.load', (['encoder.save_abspath'], {}), '(encoder.save_abspath)\n', (2107, 2129), False, 'from jina.executors import BaseExecutor\n'), ((2544, 2573), 'numpy.random.rand', 'np.random.rand', (['(10)', 'input_dim'], {}), '(10, input_dim)\n', (2558, 2573), True, 'import numpy as np\n'), ((2611, 2649), 'os.path.exists', 'os.path.exists', (['encoder.config_abspath'], {}), '(encoder.config_abspath)\n', (2625, 2649), False, 'import os\n'), ((2671, 2719), 'jina.executors.BaseExecutor.load_config', 'BaseExecutor.load_config', (['encoder.config_abspath'], {}), '(encoder.config_abspath)\n', (2695, 2719), False, 'from jina.executors import BaseExecutor\n'), ((2379, 2449), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['encoded_data_test', 'encoded_data_control'], {}), '(encoded_data_test, encoded_data_control)\n', (2408, 2449), True, 'import numpy as np\n'), ((345, 365), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (359, 365), False, 'import os\n'), ((382, 402), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (396, 402), False, 'import os\n'), ((420, 435), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (429, 435), False, 'import os\n'), ((453, 472), 'os.path.isdir', 'os.path.isdir', (['file'], {}), '(file)\n', (466, 472), False, 'import os\n'), ((490, 544), 'shutil.rmtree', 'shutil.rmtree', (['file'], {'ignore_errors': '(False)', 'onerror': 'None'}), '(file, ignore_errors=False, onerror=None)\n', (503, 544), False, 'import shutil\n')] |
import csv
import os
import json
from os import path
import cv2
import shutil
import numpy as np
import skimage.draw
import skimage.io
images = {}
with open('awe-translation.csv', 'r') as f:
reader = csv.reader(f)
c = [x for x in reader][1:]
images = {x[1]: {"src": x[0], "subject": int(x[2])} for x in c}
map = {}
for x in os.listdir('AWEDataset'):
if os.path.isdir(os.path.join('AWEDataset', x)):
with open(os.path.join('AWEDataset', x, 'annotations.json'), 'r') as f:
d = json.load(f)
for k in d['data']:
i = d['data'][k]
m = "{}/{}".format(x, i['file'])
s = images[m]
lr = i['d'].upper()
if 'test' in s['src']:
dir = 'test'
else:
dir = 'train'
fn = s['src'][-8:-4]
target = os.path.join('converted', dir, x, lr, fn)
path = os.path.join('AWEForSegmentation', s['src'])
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
img = cv2.imread(path)
mask = cv2.imread(path.replace(dir, '{}annot'.format(dir)))
bb = cv2.imread(path.replace(dir, '{}annot_rect'.format(dir)))
w, h, c = img.shape
gray = cv2.cvtColor(bb, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
mask_path = target + '.npy'
c = contours[0]
if len(contours) > 1:
if lr == 'L':
for cc in contours[1:]:
x1 = min([p[0][0] for p in c if p[0][0] >= 0])
x2 = min([p[0][0] for p in cc if p[0][0] >= 0])
if x1 < x2:
c = cc
else:
for cc in contours[1:]:
x1 = min([p[0][0] for p in c if p[0][0] >= 0])
x2 = min([p[0][0] for p in cc if p[0][0] >= 0])
if x1 > x2:
c = cc
m = np.full((w, h), False)
y1 = min([p[0][1] for p in c if p[0][1] >= 0])
y2 = max([p[0][1] for p in c if p[0][1] >= 0]) + 1
x1 = min([p[0][0] for p in c if p[0][0] >= 0])
x2 = max([p[0][0] for p in c if p[0][0] >= 0]) + 1
s = mask[y1:y2, x1:x2, 1] > 0
m[y1:y2, x1:x2] = s
if dir == 'train':
shutil.copy(path, target + '.png')
with open(mask_path, 'wb+') as file:
np.save(file, m)
else:
mask_out = img * np.stack([m, m, m], axis=2)
out = mask_out[y1:y2, x1:x2]
cv2.imwrite(target + '.png', out)
| [
"cv2.imwrite",
"os.listdir",
"cv2.threshold",
"os.path.join",
"cv2.findContours",
"os.path.dirname",
"numpy.stack",
"cv2.cvtColor",
"shutil.copy",
"json.load",
"numpy.full",
"csv.reader",
"numpy.save",
"cv2.imread"
] | [((339, 363), 'os.listdir', 'os.listdir', (['"""AWEDataset"""'], {}), "('AWEDataset')\n", (349, 363), False, 'import os\n'), ((206, 219), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (216, 219), False, 'import csv\n'), ((386, 415), 'os.path.join', 'os.path.join', (['"""AWEDataset"""', 'x'], {}), "('AWEDataset', x)\n", (398, 415), False, 'import os\n'), ((514, 526), 'json.load', 'json.load', (['f'], {}), '(f)\n', (523, 526), False, 'import json\n'), ((436, 485), 'os.path.join', 'os.path.join', (['"""AWEDataset"""', 'x', '"""annotations.json"""'], {}), "('AWEDataset', x, 'annotations.json')\n", (448, 485), False, 'import os\n'), ((897, 938), 'os.path.join', 'os.path.join', (['"""converted"""', 'dir', 'x', 'lr', 'fn'], {}), "('converted', dir, x, lr, fn)\n", (909, 938), False, 'import os\n'), ((962, 1006), 'os.path.join', 'os.path.join', (['"""AWEForSegmentation"""', "s['src']"], {}), "('AWEForSegmentation', s['src'])\n", (974, 1006), False, 'import os\n'), ((1151, 1167), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1161, 1167), False, 'import cv2\n'), ((1383, 1419), 'cv2.cvtColor', 'cv2.cvtColor', (['bb', 'cv2.COLOR_BGR2GRAY'], {}), '(bb, cv2.COLOR_BGR2GRAY)\n', (1395, 1419), False, 'import cv2\n'), ((1522, 1586), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1538, 1586), False, 'import cv2\n'), ((2416, 2438), 'numpy.full', 'np.full', (['(w, h)', '(False)'], {}), '((w, h), False)\n', (2423, 2438), True, 'import numpy as np\n'), ((1445, 1491), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 0, 255, cv2.THRESH_BINARY)\n', (1458, 1491), False, 'import cv2\n'), ((2836, 2870), 'shutil.copy', 'shutil.copy', (['path', "(target + '.png')"], {}), "(path, target + '.png')\n", (2847, 2870), False, 'import shutil\n'), ((3125, 3158), 'cv2.imwrite', 'cv2.imwrite', (["(target + '.png')", 'out'], {}), "(target + '.png', out)\n", (3136, 3158), False, 'import cv2\n'), ((1045, 1068), 'os.path.dirname', 'os.path.dirname', (['target'], {}), '(target)\n', (1060, 1068), False, 'import os\n'), ((1103, 1126), 'os.path.dirname', 'os.path.dirname', (['target'], {}), '(target)\n', (1118, 1126), False, 'import os\n'), ((2952, 2968), 'numpy.save', 'np.save', (['file', 'm'], {}), '(file, m)\n', (2959, 2968), True, 'import numpy as np\n'), ((3028, 3055), 'numpy.stack', 'np.stack', (['[m, m, m]'], {'axis': '(2)'}), '([m, m, m], axis=2)\n', (3036, 3055), True, 'import numpy as np\n')] |
"""
Deep CCA
===========================
This example demonstrates how to easily train Deep CCA models and variants
"""
import numpy as np
import pytorch_lightning as pl
from matplotlib import pyplot as plt
from torch.utils.data import Subset
# %%
from cca_zoo.data import Split_MNIST_Dataset
from cca_zoo.deepmodels import (
DCCA,
CCALightning,
get_dataloaders,
architectures,
DCCA_NOI,
DCCA_SDL,
BarlowTwins,
)
def plot_latent_label(model, dataloader, num_batches=100):
fig, ax = plt.subplots(ncols=model.latent_dims)
for j in range(model.latent_dims):
ax[j].set_title(f"Dimension {j}")
ax[j].set_xlabel("View 1")
ax[j].set_ylabel("View 2")
for i, batch in enumerate(dataloader):
z = model(*batch["views"])
zx, zy = z
zx = zx.to("cpu").detach().numpy()
zy = zy.to("cpu").detach().numpy()
for j in range(model.latent_dims):
ax[j].scatter(zx[:, j], zy[:, j], c=batch["label"].numpy(), cmap="tab10")
if i > num_batches:
plt.colorbar()
break
n_train = 500
n_val = 100
train_dataset = Split_MNIST_Dataset(mnist_type="MNIST", train=True)
val_dataset = Subset(train_dataset, np.arange(n_train, n_train + n_val))
train_dataset = Subset(train_dataset, np.arange(n_train))
train_loader, val_loader = get_dataloaders(train_dataset, val_dataset, batch_size=128)
# The number of latent dimensions across models
latent_dims = 2
# number of epochs for deep models
epochs = 20
encoder_1 = architectures.Encoder(latent_dims=latent_dims, feature_size=392)
encoder_2 = architectures.Encoder(latent_dims=latent_dims, feature_size=392)
# %%
# Deep CCA
dcca = DCCA(latent_dims=latent_dims, encoders=[encoder_1, encoder_2])
dcca = CCALightning(dcca)
trainer = pl.Trainer(max_epochs=epochs, enable_checkpointing=False)
trainer.fit(dcca, train_loader, val_loader)
plot_latent_label(dcca.model, train_loader)
plt.suptitle("DCCA")
plt.show()
# %%
# Deep CCA by Non-Linear Orthogonal Iterations
dcca_noi = DCCA_NOI(
latent_dims=latent_dims, N=len(train_dataset), encoders=[encoder_1, encoder_2]
)
dcca_noi = CCALightning(dcca_noi)
trainer = pl.Trainer(max_epochs=epochs, enable_checkpointing=False)
trainer.fit(dcca_noi, train_loader, val_loader)
plot_latent_label(dcca_noi.model, train_loader)
plt.suptitle("DCCA by Non-Linear Orthogonal Iterations")
plt.show()
# %%
# Deep CCA by Stochastic Decorrelation Loss
dcca_sdl = DCCA_SDL(
latent_dims=latent_dims, N=len(train_dataset), encoders=[encoder_1, encoder_2]
)
dcca_sdl = CCALightning(dcca_sdl)
trainer = pl.Trainer(max_epochs=epochs, enable_checkpointing=False)
trainer.fit(dcca_sdl, train_loader, val_loader)
plot_latent_label(dcca_sdl.model, train_loader)
plt.suptitle("DCCA by Stochastic Decorrelation")
plt.show()
# %%
# Deep CCA by <NAME>
barlowtwins = BarlowTwins(latent_dims=latent_dims, encoders=[encoder_1, encoder_2])
barlowtwins = CCALightning(barlowtwins)
trainer = pl.Trainer(max_epochs=epochs, enable_checkpointing=False)
trainer.fit(dcca, train_loader, val_loader)
plot_latent_label(dcca_sdl.model, train_loader)
plt.suptitle("DCCA by <NAME>")
plt.show()
| [
"cca_zoo.deepmodels.CCALightning",
"cca_zoo.deepmodels.BarlowTwins",
"matplotlib.pyplot.colorbar",
"pytorch_lightning.Trainer",
"cca_zoo.deepmodels.DCCA",
"matplotlib.pyplot.subplots",
"cca_zoo.data.Split_MNIST_Dataset",
"cca_zoo.deepmodels.get_dataloaders",
"cca_zoo.deepmodels.architectures.Encoder... | [((1137, 1188), 'cca_zoo.data.Split_MNIST_Dataset', 'Split_MNIST_Dataset', ([], {'mnist_type': '"""MNIST"""', 'train': '(True)'}), "(mnist_type='MNIST', train=True)\n", (1156, 1188), False, 'from cca_zoo.data import Split_MNIST_Dataset\n'), ((1347, 1406), 'cca_zoo.deepmodels.get_dataloaders', 'get_dataloaders', (['train_dataset', 'val_dataset'], {'batch_size': '(128)'}), '(train_dataset, val_dataset, batch_size=128)\n', (1362, 1406), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((1532, 1596), 'cca_zoo.deepmodels.architectures.Encoder', 'architectures.Encoder', ([], {'latent_dims': 'latent_dims', 'feature_size': '(392)'}), '(latent_dims=latent_dims, feature_size=392)\n', (1553, 1596), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((1609, 1673), 'cca_zoo.deepmodels.architectures.Encoder', 'architectures.Encoder', ([], {'latent_dims': 'latent_dims', 'feature_size': '(392)'}), '(latent_dims=latent_dims, feature_size=392)\n', (1630, 1673), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((1698, 1760), 'cca_zoo.deepmodels.DCCA', 'DCCA', ([], {'latent_dims': 'latent_dims', 'encoders': '[encoder_1, encoder_2]'}), '(latent_dims=latent_dims, encoders=[encoder_1, encoder_2])\n', (1702, 1760), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((1768, 1786), 'cca_zoo.deepmodels.CCALightning', 'CCALightning', (['dcca'], {}), '(dcca)\n', (1780, 1786), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((1797, 1854), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'epochs', 'enable_checkpointing': '(False)'}), '(max_epochs=epochs, enable_checkpointing=False)\n', (1807, 1854), True, 'import pytorch_lightning as pl\n'), ((1943, 1963), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""DCCA"""'], {}), "('DCCA')\n", (1955, 1963), True, 'from matplotlib import pyplot as plt\n'), ((1964, 1974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1972, 1974), True, 'from matplotlib import pyplot as plt\n'), ((2145, 2167), 'cca_zoo.deepmodels.CCALightning', 'CCALightning', (['dcca_noi'], {}), '(dcca_noi)\n', (2157, 2167), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((2178, 2235), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'epochs', 'enable_checkpointing': '(False)'}), '(max_epochs=epochs, enable_checkpointing=False)\n', (2188, 2235), True, 'import pytorch_lightning as pl\n'), ((2332, 2388), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""DCCA by Non-Linear Orthogonal Iterations"""'], {}), "('DCCA by Non-Linear Orthogonal Iterations')\n", (2344, 2388), True, 'from matplotlib import pyplot as plt\n'), ((2389, 2399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2397, 2399), True, 'from matplotlib import pyplot as plt\n'), ((2567, 2589), 'cca_zoo.deepmodels.CCALightning', 'CCALightning', (['dcca_sdl'], {}), '(dcca_sdl)\n', (2579, 2589), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((2600, 2657), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'epochs', 'enable_checkpointing': '(False)'}), '(max_epochs=epochs, enable_checkpointing=False)\n', (2610, 2657), True, 'import pytorch_lightning as pl\n'), ((2754, 2802), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""DCCA by Stochastic Decorrelation"""'], {}), "('DCCA by Stochastic Decorrelation')\n", (2766, 2802), True, 'from matplotlib import pyplot as plt\n'), ((2803, 2813), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2811, 2813), True, 'from matplotlib import pyplot as plt\n'), ((2855, 2924), 'cca_zoo.deepmodels.BarlowTwins', 'BarlowTwins', ([], {'latent_dims': 'latent_dims', 'encoders': '[encoder_1, encoder_2]'}), '(latent_dims=latent_dims, encoders=[encoder_1, encoder_2])\n', (2866, 2924), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((2939, 2964), 'cca_zoo.deepmodels.CCALightning', 'CCALightning', (['barlowtwins'], {}), '(barlowtwins)\n', (2951, 2964), False, 'from cca_zoo.deepmodels import DCCA, CCALightning, get_dataloaders, architectures, DCCA_NOI, DCCA_SDL, BarlowTwins\n'), ((2975, 3032), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'epochs', 'enable_checkpointing': '(False)'}), '(max_epochs=epochs, enable_checkpointing=False)\n', (2985, 3032), True, 'import pytorch_lightning as pl\n'), ((3125, 3155), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""DCCA by <NAME>"""'], {}), "('DCCA by <NAME>')\n", (3137, 3155), True, 'from matplotlib import pyplot as plt\n'), ((3156, 3166), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3164, 3166), True, 'from matplotlib import pyplot as plt\n'), ((519, 556), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'model.latent_dims'}), '(ncols=model.latent_dims)\n', (531, 556), True, 'from matplotlib import pyplot as plt\n'), ((1225, 1260), 'numpy.arange', 'np.arange', (['n_train', '(n_train + n_val)'], {}), '(n_train, n_train + n_val)\n', (1234, 1260), True, 'import numpy as np\n'), ((1300, 1318), 'numpy.arange', 'np.arange', (['n_train'], {}), '(n_train)\n', (1309, 1318), True, 'import numpy as np\n'), ((1060, 1074), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1072, 1074), True, 'from matplotlib import pyplot as plt\n')] |
import torch
import numpy as np
import math
from scipy.stats import norm
import matplotlib.pyplot as plt
def plot_gaussian_mixture_1d(var, weights, mu=None):
"""
Visualize 1D Gaussian mixture
"""
if mu is None:
mu = np.zeros_like(var)
x = np.linspace(start = -10, stop = 10, num = 2000)
y_cum = np.zeros_like(x)
for ii in range(var.shape[0]):
y = norm(0,np.sqrt(var[ii].item())).pdf(x)
y_cum = y * weights[ii].item() + y_cum
plt.plot(x, y_cum)
def standardize(data_train, *args):
"""
Standardize a dataset to have zero mean and unit standard deviation.
:param data_train: 2-D Numpy array. Training data.
:param data_test: 2-D Numpy array. Test data.
:return: (train_set, test_set, mean, std), The standardized dataset and
their mean and standard deviation before processing.
"""
std = np.std(data_train, 0, keepdims=True)
std[std == 0] = 1
mean = np.mean(data_train, 0, keepdims=True)
data_train_standardized = (data_train - mean) / std
output = [data_train_standardized]
for d in args:
dd = (d - mean) / std
output.append(dd)
output.append(mean)
output.append(std)
return output
def GP_noise(y1, K11, K12, K22, epsilon_noise, device):
"""
Calculate the posterior mean and covariance matrix for y2 based on the noisy observations y1 and the given kernel matrix
"""
# Kernel of the noisy observations
K11 = K11 + epsilon_noise * torch.eye(K11.shape[0]).to(device)
solved, _ = torch.solve(K12, K11)
# Compute posterior mean
mu_2 = torch.matmul(solved.T, y1)
var_2 = K22 - torch.matmul(solved.T, K12)
return mu_2, var_2 # mean, covariance
def cal_marg_likelihood_single(K, f, epsilon, device):
N = f.shape[0]
L = torch.cholesky(K+epsilon*torch.eye(N).to(device))
singular_values = L.diagonal(offset=0)
logdet = torch.sum(torch.log(singular_values)*2)
data_fit = -(f.transpose(-1,-2)).matmul(torch.inverse(K+epsilon*torch.eye(N).to(device))).matmul(f).squeeze(-1)
AvgMLL = (0.5*data_fit - 0.5*logdet)/N - 0.5*math.log(2*math.pi)
return AvgMLL
def cal_marg_likelihood_single_L(f, L):
N = f.shape[0]
singular_values = L.diagonal(offset=0)
logdet = torch.sum(torch.log(singular_values)*2)
L_inv = torch.inverse(L)
f_bar = L_inv.matmul(f)
data_fit = -(f_bar.transpose(-1,-2).matmul(f_bar)).squeeze(-1)
AvgMLL = (0.5*data_fit - 0.5*logdet)/N - 0.5*math.log(2*math.pi)
return AvgMLL
def cal_marg_likelihood(K, f, epsilon, kernel_mask, diagonal_mask, N, device):
# K: B X N X N
# f: B X N X 1 (filled with zeros)
diag_size = f.shape[1]
K = (K + epsilon*torch.eye(diag_size).to(device).unsqueeze(0))*kernel_mask # fill the rest with zeros
K = K+torch.eye(diag_size).to(device).unsqueeze(0)*(1-kernel_mask) # add ones to the diagonal
L = torch.cholesky(K)
singular_values = L.diagonal(offset=0, dim1=1, dim2=2)
logdet = torch.sum(torch.log(singular_values)*2*(1-diagonal_mask),1)
data_fit = -(f.transpose(-1,-2)).matmul(torch.inverse(K)).matmul(f).squeeze(1).squeeze(1)
AvgMLL = (0.5*data_fit - 0.5*logdet)/N - 0.5*math.log(2*math.pi)
return AvgMLL
def cal_kern_per(X1,X2,period,lengthscale):
#lengthscale: (B or None) X D
#period:(B or None) X D
#X1: (B or None) X N1 X D, X2: (B or None) X N2 X D
period = period.unsqueeze(-2) # (B or None) X 1 X D
X1 = X1.div(period).unsqueeze(-2) #shape --> (B or None) X N X 1 X D
X2 = X2.div(period).unsqueeze(-3) #shape --> (B or None) x 1 x N x D
X_diff = torch.abs(X1 - X2) #shape --> B x N x N x D
lengthscale = (lengthscale**2).unsqueeze(-2).unsqueeze(-2) # B X 1 X 1 X D
K = (-2*(torch.sin(math.pi*X_diff)**2)/lengthscale).exp_() # B X N X N X D
K = torch.prod(K,-1) # B X N X N
return K
def cal_kern_rbf(X1,X2,lengthscale):
#lengthscale: B or None X D
#X1: B or None X N1 X D, X2: B or None X N2 X D
lengthscale = lengthscale.unsqueeze(-2)#B X 1 X D
X1 = X1.div(lengthscale)
X2 = X2.div(lengthscale)
X1_norm = torch.sum(X1 ** 2, dim = -1).unsqueeze(-1)#B X N1 X 1
X2_norm = torch.sum(X2 ** 2, dim = -1).unsqueeze(-2)#B X 1 X N2
Distance_squared = (X1_norm + X2_norm - 2 * torch.matmul(X1, X2.transpose(-1,-2))).clamp_min_(0)
K = torch.exp(-Distance_squared) #shape: B X N1 X N2
return K
def cal_kern_matern(X1,X2,lengthscale,nu=0.5):
#lengthscale: B X D
#X1: B X N1 X D, X2: B X N2 X D
lengthscale = lengthscale.unsqueeze(-2)#B X 1 X D
X1 = X1.div(lengthscale)
X2 = X2.div(lengthscale)
X1_norm = torch.sum(X1 ** 2, dim = -1).unsqueeze(-1)#B X N1 X 1
X2_norm = torch.sum(X2 ** 2, dim = -1).unsqueeze(-2)#B X 1 X N2
Distance_squared = (X1_norm + X2_norm - 2 * torch.matmul(X1, X2.transpose(-1,-2))).clamp_min_(1e-30)
Distance = torch.sqrt(Distance_squared)
exp_component = torch.exp(-math.sqrt(nu * 2) * Distance)
if nu == 0.5:
constant_component = 1
elif nu == 1.5:
constant_component = (math.sqrt(3) * Distance).add(1)
elif nu == 2.5:
constant_component = (math.sqrt(5) * Distance).add(1).add(5.0 / 3.0 * (Distance) ** 2)
K = torch.mul(constant_component,exp_component) #shape: B X N1 X N2
return K
def cal_kern_spec_mix_sep(X1, X2, mu, var, weights):
#X1: shape B X N1 X D, X2: B X N2 X D
#mu: B X M X (D or 1)
#var: B X M X (D or 1)
#weights: B X M X (D or 1)
X1 = X1.unsqueeze(-2) #shape --> (B or None) X N X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N x N x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
mu = mu.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
kern_all = (weights.unsqueeze(-2).unsqueeze(-2))*torch.exp(-2*(math.pi**2)*X_diff_squared*var)*torch.cos(2*math.pi*X_diff*mu) # shape --> B x M x N x N x D
kern_all = torch.sum(kern_all,-4) #sum up the average of the mixture of kernels, shape --> B x N x N x D
kern = torch.prod(kern_all,-1) #shape --> B x N x N
return kern
def cal_kern_spec_mix_nomu_sep(X1, X2, var, weights):
X1 = X1.unsqueeze(-2) #shape --> (B or None) X N X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N x N x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
kern_all = (weights.unsqueeze(-2).unsqueeze(-2))*torch.exp(-2*(math.pi**2)*X_diff_squared*var) # shape --> B x M x N x N x D
kern_all = torch.sum(kern_all,-4) #sum up the average of the mixture of kernels, shape --> B x N x N x D
kern = torch.prod(kern_all,-1) #shape --> B x N x N
return kern
def cal_kern_spec_mix(X1, X2, mu, var, weights):
#X1: shape B X N1 X D, X2: B X N2 X D
#mu: B X M X (D or 1)
#var: B X M X (D or 1)
#weights: B X M
X1 = X1.unsqueeze(-2) #shape --> B X N1 X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N2 x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N1 x N2 x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
mu = mu.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
log_exp_component = -2*(math.pi**2)*X_diff_squared*var
exp_component = torch.exp(torch.sum(log_exp_component,-1)) #shape --> B x M x N1 x N2
cos_component = torch.prod(torch.cos(2*math.pi*X_diff*mu),-1)# product of all D dimensions
weights = weights.unsqueeze(-1).unsqueeze(-1) # shape --> B x M x 1 x 1
kern_all = weights*exp_component*cos_component # shape --> B x M x N1 x N2
kern = torch.sum(kern_all,-3) #sum up the average of the mixture of kernels
return kern
def cal_kern_spec_mix_nomu(X1, X2, var, weights):
#X1: shape B X N1 X D, X2: B X N2 X D
#var: B X M X (D or 1)
#weights: B X M
X1 = X1.unsqueeze(-2) #shape --> B X N1 X 1 X D
X2 = X2.unsqueeze(-3) #shape --> B x 1 x N2 x D
X_diff = (X1 - X2).unsqueeze(-4) #shape --> B x 1 x N1 x N2 x D
X_diff_squared = X_diff**2
var = var.unsqueeze(-2).unsqueeze(-2) # shape --> B x M x 1 x 1 x (D or 1)
log_exp_component = -2*(math.pi**2)*X_diff_squared*var
exp_component = torch.exp(torch.sum(log_exp_component,-1)) #shape --> B x M x N1 x N2
weights = weights.unsqueeze(-1).unsqueeze(-1) # shape --> B x M x 1 x 1
kern_all = weights*exp_component # shape --> B x M x N1 x N2
kern = torch.sum(kern_all,-3) #sum up the average of the mixture of kernels
return kern | [
"torch.mul",
"math.sqrt",
"torch.sqrt",
"torch.exp",
"math.log",
"torch.sin",
"torch.cos",
"torch.sum",
"numpy.mean",
"torch.eye",
"matplotlib.pyplot.plot",
"torch.prod",
"numpy.linspace",
"torch.matmul",
"torch.abs",
"torch.cholesky",
"torch.solve",
"numpy.std",
"torch.log",
"... | [((254, 295), 'numpy.linspace', 'np.linspace', ([], {'start': '(-10)', 'stop': '(10)', 'num': '(2000)'}), '(start=-10, stop=10, num=2000)\n', (265, 295), True, 'import numpy as np\n'), ((312, 328), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (325, 328), True, 'import numpy as np\n'), ((454, 472), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_cum'], {}), '(x, y_cum)\n', (462, 472), True, 'import matplotlib.pyplot as plt\n'), ((834, 870), 'numpy.std', 'np.std', (['data_train', '(0)'], {'keepdims': '(True)'}), '(data_train, 0, keepdims=True)\n', (840, 870), True, 'import numpy as np\n'), ((900, 937), 'numpy.mean', 'np.mean', (['data_train', '(0)'], {'keepdims': '(True)'}), '(data_train, 0, keepdims=True)\n', (907, 937), True, 'import numpy as np\n'), ((1461, 1482), 'torch.solve', 'torch.solve', (['K12', 'K11'], {}), '(K12, K11)\n', (1472, 1482), False, 'import torch\n'), ((1519, 1545), 'torch.matmul', 'torch.matmul', (['solved.T', 'y1'], {}), '(solved.T, y1)\n', (1531, 1545), False, 'import torch\n'), ((2209, 2225), 'torch.inverse', 'torch.inverse', (['L'], {}), '(L)\n', (2222, 2225), False, 'import torch\n'), ((2765, 2782), 'torch.cholesky', 'torch.cholesky', (['K'], {}), '(K)\n', (2779, 2782), False, 'import torch\n'), ((3450, 3468), 'torch.abs', 'torch.abs', (['(X1 - X2)'], {}), '(X1 - X2)\n', (3459, 3468), False, 'import torch\n'), ((3654, 3671), 'torch.prod', 'torch.prod', (['K', '(-1)'], {}), '(K, -1)\n', (3664, 3671), False, 'import torch\n'), ((4155, 4183), 'torch.exp', 'torch.exp', (['(-Distance_squared)'], {}), '(-Distance_squared)\n', (4164, 4183), False, 'import torch\n'), ((4673, 4701), 'torch.sqrt', 'torch.sqrt', (['Distance_squared'], {}), '(Distance_squared)\n', (4683, 4701), False, 'import torch\n'), ((4995, 5039), 'torch.mul', 'torch.mul', (['constant_component', 'exp_component'], {}), '(constant_component, exp_component)\n', (5004, 5039), False, 'import torch\n'), ((5766, 5789), 'torch.sum', 'torch.sum', (['kern_all', '(-4)'], {}), '(kern_all, -4)\n', (5775, 5789), False, 'import torch\n'), ((5869, 5893), 'torch.prod', 'torch.prod', (['kern_all', '(-1)'], {}), '(kern_all, -1)\n', (5879, 5893), False, 'import torch\n'), ((6401, 6424), 'torch.sum', 'torch.sum', (['kern_all', '(-4)'], {}), '(kern_all, -4)\n', (6410, 6424), False, 'import torch\n'), ((6504, 6528), 'torch.prod', 'torch.prod', (['kern_all', '(-1)'], {}), '(kern_all, -1)\n', (6514, 6528), False, 'import torch\n'), ((7465, 7488), 'torch.sum', 'torch.sum', (['kern_all', '(-3)'], {}), '(kern_all, -3)\n', (7474, 7488), False, 'import torch\n'), ((8245, 8268), 'torch.sum', 'torch.sum', (['kern_all', '(-3)'], {}), '(kern_all, -3)\n', (8254, 8268), False, 'import torch\n'), ((229, 247), 'numpy.zeros_like', 'np.zeros_like', (['var'], {}), '(var)\n', (242, 247), True, 'import numpy as np\n'), ((1562, 1589), 'torch.matmul', 'torch.matmul', (['solved.T', 'K12'], {}), '(solved.T, K12)\n', (1574, 1589), False, 'import torch\n'), ((5692, 5728), 'torch.cos', 'torch.cos', (['(2 * math.pi * X_diff * mu)'], {}), '(2 * math.pi * X_diff * mu)\n', (5701, 5728), False, 'import torch\n'), ((6312, 6363), 'torch.exp', 'torch.exp', (['(-2 * math.pi ** 2 * X_diff_squared * var)'], {}), '(-2 * math.pi ** 2 * X_diff_squared * var)\n', (6321, 6363), False, 'import torch\n'), ((7152, 7184), 'torch.sum', 'torch.sum', (['log_exp_component', '(-1)'], {}), '(log_exp_component, -1)\n', (7161, 7184), False, 'import torch\n'), ((7241, 7277), 'torch.cos', 'torch.cos', (['(2 * math.pi * X_diff * mu)'], {}), '(2 * math.pi * X_diff * mu)\n', (7250, 7277), False, 'import torch\n'), ((8039, 8071), 'torch.sum', 'torch.sum', (['log_exp_component', '(-1)'], {}), '(log_exp_component, -1)\n', (8048, 8071), False, 'import torch\n'), ((1822, 1848), 'torch.log', 'torch.log', (['singular_values'], {}), '(singular_values)\n', (1831, 1848), False, 'import torch\n'), ((2013, 2034), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (2021, 2034), False, 'import math\n'), ((2169, 2195), 'torch.log', 'torch.log', (['singular_values'], {}), '(singular_values)\n', (2178, 2195), False, 'import torch\n'), ((2364, 2385), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (2372, 2385), False, 'import math\n'), ((3050, 3071), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (3058, 3071), False, 'import math\n'), ((3930, 3956), 'torch.sum', 'torch.sum', (['(X1 ** 2)'], {'dim': '(-1)'}), '(X1 ** 2, dim=-1)\n', (3939, 3956), False, 'import torch\n'), ((3996, 4022), 'torch.sum', 'torch.sum', (['(X2 ** 2)'], {'dim': '(-1)'}), '(X2 ** 2, dim=-1)\n', (4005, 4022), False, 'import torch\n'), ((4437, 4463), 'torch.sum', 'torch.sum', (['(X1 ** 2)'], {'dim': '(-1)'}), '(X1 ** 2, dim=-1)\n', (4446, 4463), False, 'import torch\n'), ((4503, 4529), 'torch.sum', 'torch.sum', (['(X2 ** 2)'], {'dim': '(-1)'}), '(X2 ** 2, dim=-1)\n', (4512, 4529), False, 'import torch\n'), ((5646, 5697), 'torch.exp', 'torch.exp', (['(-2 * math.pi ** 2 * X_diff_squared * var)'], {}), '(-2 * math.pi ** 2 * X_diff_squared * var)\n', (5655, 5697), False, 'import torch\n'), ((2861, 2887), 'torch.log', 'torch.log', (['singular_values'], {}), '(singular_values)\n', (2870, 2887), False, 'import torch\n'), ((4731, 4748), 'math.sqrt', 'math.sqrt', (['(nu * 2)'], {}), '(nu * 2)\n', (4740, 4748), False, 'import math\n'), ((1412, 1435), 'torch.eye', 'torch.eye', (['K11.shape[0]'], {}), '(K11.shape[0])\n', (1421, 1435), False, 'import torch\n'), ((1735, 1747), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (1744, 1747), False, 'import torch\n'), ((3582, 3609), 'torch.sin', 'torch.sin', (['(math.pi * X_diff)'], {}), '(math.pi * X_diff)\n', (3591, 3609), False, 'import torch\n'), ((4848, 4860), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (4857, 4860), False, 'import math\n'), ((2671, 2691), 'torch.eye', 'torch.eye', (['diag_size'], {}), '(diag_size)\n', (2680, 2691), False, 'import torch\n'), ((2578, 2598), 'torch.eye', 'torch.eye', (['diag_size'], {}), '(diag_size)\n', (2587, 2598), False, 'import torch\n'), ((2953, 2969), 'torch.inverse', 'torch.inverse', (['K'], {}), '(K)\n', (2966, 2969), False, 'import torch\n'), ((4924, 4936), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (4933, 4936), False, 'import math\n'), ((1918, 1930), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (1927, 1930), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import bs4 as bs
import requests
import yfinance as yf
#import fix_yahoo_finance as yf
import datetime
import io
import cv2
import skimage
import datetime
from PIL import Image
from pandas_datareader import data as pdr
from skimage import measure
from skimage.measure import block_reduce
from datetime import datetime
'''
Functions to be used for data generation
'''
def get_img_from_fig(fig, dpi=180):
# get_img_from_fig is function which returns an image as numpy array from figure
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def build_image(stockindex, idate=10, pastlag=10, futlag=3,nb_dates=1000):
# Build image from a table stockindex price list
#return a (32,32,3) np.array representing the image in color
#ising idate as a starting point
#paslag futlag number of days to consider for translate
sp500close=stockindex
nb_days=nb_dates
x_datas=[]
x_datas=np.zeros((32,32,3))
i=idate
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(sp500close[(i-pastlag):i])
plot_img_np = get_img_from_fig(fig)
x_tmp= skimage.measure.block_reduce(plot_img_np[90:620,140:970], (18,28,1), np.mean)
(x_datas[1:-1])[:,1:-1][:]=x_tmp
fig.clear()
plt.close(fig)
x_datas=x_datas/255
return x_datas
'''
MAIN FUNCTION OF CLASSIFICATION
build y state y fut
and x
'''
def class_shortterm_returnfut(x, yfut, indexforpast,tpastlag):
#this function is use to classifiy the future state based on the position of future value with the past range
#Put the value from the 2 boxes (max min) or (open close) on the time range and check if it is within
#go down go up or exit the box
#the fucntion return 5 state depending on the future value position on the boxes and one state for error cases
xpast_min=np.min(x[(indexforpast-tpastlag):indexforpast])
xpast_max=np.max(x[(indexforpast-tpastlag):indexforpast])
x_open=x[int(indexforpast-tpastlag)]
x_close=x[indexforpast]
if (yfut < xpast_min ): return 0
elif (yfut < min(x_open,x_close)): return 1
elif (yfut < max(x_open,x_close)): return 2
elif (yfut < xpast_max): return 3
elif (yfut > xpast_max): return 4
else : return -1
def main_class_shortterm_returnfut(iterable):
return class_shortterm_returnfut(sp500close, iterable, pastlag,futlag)
def normalise_df_image(xdf):
#normalisation to 0,1 range of the equity index
df_tmp=xdf
maxval=np.max(df_tmp)
df_tmp=df_tmp/maxval
return df_tmp, maxval
def build_image(stockindex, idate=10, pastlag=10, futlag=3):
#another version of returning image from a data frame index
#using the pastlag as range for the graph
#ising idate as a starting point
#return a (32,32,3) np array
#number of days to consider for translate
sp500close=stockindex
x_datas=[]
x_datas=np.zeros((32,32,3))
i=idate
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(sp500close[(i-pastlag):i])
plot_img_np = get_img_from_fig(fig)
x_tmp= skimage.measure.block_reduce(plot_img_np[90:620,140:970], (18,28,1), np.mean)
(x_datas[1:-1])[:,1:-1][:]=x_tmp
fig.clear()
plt.close(fig)
x_datas=x_datas/255
return x_datas
def build_image_optimfig(fig, stockindex, idate=10, pastlag=10, futlag=3):
#version of returning image from a data frame index
#using the pastlag as range for the graph
#ising idate as a starting point
#return a (32,32,3) np array
#this one is optimisng the use of ram
#number of days to consider for translate
sp500close=stockindex
x_datas=[]
x_datas=np.zeros((32,32,3))
i=idate
plt.plot(sp500close[(i-pastlag):i])
plot_img_np = get_img_from_fig(fig)
x_tmp= skimage.measure.block_reduce(plot_img_np[90:620,140:970], (18,28,1), np.mean)
(x_datas[1:-1])[:,1:-1][:]=x_tmp
x_datas=x_datas/255
return x_datas
def build_image_df(xdf, past_step,fut_step) :
'''
returning a dictionary of time series dataframes to be used in setup_input_NN_image so a to generate
Input X Result Y_StateClass, Y_FutPredict
pastlag as range for the graph
fut _step the future value lag in time to predict or to check the financial state of the market
#times series to get information from the stock index value
'stock_value':the time serie of the index normalised on the whole period
'moving_average': time serie of the rolling moving average value of the index for past step image
"max": time serie of the rolling max value of the index for past step image
"min": time serie of the rolling min value of the index for past step image
'volatility': time serie of the rolling vol value of the index for past step image
'df_x_image': is a time series of flattened (1, ) calculed from images (32, 32, 3) list
#I had to flatten it because panda does not create table with this format
'market_state': future markket state to be predicted time lag is futlag
'future_value': future value of stock price to predict time lag is futlag
'future_volatility': time serie of the future volatility of the index time lag is futlag
'''
df_stockvaluecorrected=xdf
df_stockvaluecorrected, _ = normalise_df_image(df_stockvaluecorrected)
df_pctchge = df_stockvaluecorrected.pct_change(periods=past_step)
df_movave = df_stockvaluecorrected.rolling(window=past_step).mean()
df_volaty = np.sqrt(252)*df_pctchge.rolling(window=past_step).std()
df_max =df_stockvaluecorrected.rolling(window=past_step).max()
df_min =df_stockvaluecorrected.rolling(window=past_step).min()
df_Fut_value =df_stockvaluecorrected.shift(periods=-fut_step)
df_Fut_value.name='future_value'
df_Fut_volaty =df_volaty.shift(periods=-fut_step)
df_market_state=pd.DataFrame(index=df_stockvaluecorrected.index,columns=['market_state'],dtype=np.float64)
tmpimage=build_image(df_stockvaluecorrected,past_step+1,pastlag=past_step,futlag=fut_step)
flatten_image=np.reshape(tmpimage,(1,-1))
colname_d_x_image_flattened = ['Image Col'+str(j) for j in range(flatten_image.shape[1])]
np_x_image=np.zeros((len(df_stockvaluecorrected.index),flatten_image.shape[1]))
for i in range(len(df_stockvaluecorrected.index)):
yfut=df_Fut_value.iloc[i]
df_market_state.iloc[i]=class_shortterm_returnfut(df_stockvaluecorrected,yfut, i,tpastlag=past_step)
print("loop 1 market state :", "step ",i,"market state fut", df_market_state.iloc[i]," future value",df_Fut_value.iloc[i] )
df_market_state.index=df_Fut_value.index
fig=plt.figure()
for i in range(len(df_stockvaluecorrected.index)):
try:
tmpimage=build_image_optimfig(fig, df_stockvaluecorrected,i,pastlag=past_step,futlag=fut_step)
np_x_image[i,:]=np.reshape(tmpimage,(1,-1))
print("loop 2 image :", "step ",i,"market state fut", df_market_state.iloc[i]," future value",df_Fut_value.iloc[i] )
except:
print("error at index", i)
df_x_image=pd.DataFrame(data=np_x_image,columns=colname_d_x_image_flattened, index=df_stockvaluecorrected.index)
fig.clear
plt.close(fig)
df_data= {
'stock_value': df_stockvaluecorrected,
'moving_average': df_movave,
"max": df_max,
"min": df_max,
'volatility': df_volaty,
'future_volatility': df_Fut_volaty,
'df_x_image':df_x_image,
'market_state':df_market_state,
'future_value': df_Fut_value,
}
return df_data
def build_image_clean(stockindex_ohlcv, ret_image_size=(32,32,3), idate=10, pastlag=32):
'''
TO BE COMPLETED
NOT USED NOW
change one date into an array (32,32,3)
Each absciss pixel is one day
in ordinate the min value of ohlc shall be 0 (volume is tabled on the third image)
in ordinate the max value of ohlc shall be (volume is tabled on the third image)
1st image: 32 x32
based on each day we place the open and close point
in ordinate int (255 * price /max ohlc)
with value of 255 for close and 127 for open
2nd image: 32 x32
based on each day we place the high low point
in ordinate int (255 * price /max ohlc)
with 64 for high and 32 for low
3rd image: 32 x32
each column value is a equal to int 255* volume of day / volume max period)
'''
#number of days to consider for translate
tsindexstock=stockindex_ohlcv.iloc[(idate-pastlag):idate]
valmax=np.max(np.array(tsindexstock[tsindexstock.columns[:-1]]))
valmin=np.min(np.array(tsindexstock[tsindexstock.columns[:-1]]))
vol=tsindexstock[tsindexstock.columns[-1]]
x_datas=np.zeros(ret_image_size)
return x_datas
def setup_input_NN_image(xdf, past_step=25,fut_step=5, split=0.8):
'''
this function the time serie of the index price
and generate the random dataset with split value from the whole time serie
X is a time serie of the flattened 32, 32 ,3 image list
Y_StateClass is a time serie of future state to predict with a classification made with class_shortterm_returnfut
Y_FutPredict is the time serie of stocke index shifted in time to be predicted
we randomize the dates and retun 2 set of dataframes
'''
xdf_data=build_image_df(xdf,past_step,fut_step)
tmp_data=pd.concat([xdf_data['market_state'],xdf_data['future_value'],xdf_data['df_x_image']],axis=1)
tmp_data=tmp_data.dropna()
Y_StateClass= tmp_data['market_state']
Y_FutPredict= tmp_data['future_value']
X=tmp_data.drop(columns=['market_state','future_value'])
nb_dates=len(Y_StateClass.index)
rng = np.random.default_rng()
list_shuffle = np.arange(nb_dates)
rng.shuffle(list_shuffle)
split_index=int(split*nb_dates)
train_split=list_shuffle[:split_index]
test_split=list_shuffle[(split_index+1):]
X_train=(X.iloc[train_split])
Y_train_StateClass=(Y_StateClass.iloc[train_split])
Y_train_FutPredict=(Y_FutPredict.iloc[train_split])
X_test=(X.iloc[test_split])
Y_test_StateClass=(Y_StateClass.iloc[test_split])
Y_test_FutPredict=(Y_FutPredict.iloc[test_split])
return (X_train, Y_train_StateClass, Y_train_FutPredict), (X_test, Y_test_StateClass, Y_test_FutPredict)
def change_X_df__nparray_image(df_X_train_image_flattened ):
'''
setup_input_NN_image returns a dataframe of flaten image for x train and xtest
then this function will change each date into a nparray list of images with 32, 32, 3 size
'''
X_train_image=df_X_train_image_flattened
nb_train=len(X_train_image.index)
x_train=np.zeros((nb_train,32,32,3))
for i in range(nb_train):
tmp=np.array(X_train_image.iloc[i])
tmp=tmp.reshape(32,32,3)
x_train[i]=tmp
return x_train
'''
COMMAND NOW FOR THE DATSET GENERATION
'''
#Recuperation from yahoo of sp500 large history
start = datetime(1920,1,1)
end = datetime(2020,7,31)
yf.pdr_override() # <== that's all it takes :-)
sp500 = pdr.get_data_yahoo('^GSPC',
start,
end)
#generate the dataset it can take 6 - 8 hours
#Need to be optimzed with more time
testsp500=(sp500['Close'])[1000:2000]
(X_train_image, Y_train_StateClass_image, Y_train_FutPredict_image) , (X_test_image, Y_test_StateClass_image, Y_test_FutPredict_image) = setup_input_NN_image(testsp500)
#copy the datafrae dataset in csv format to be used after
#dateTimeObj = datetime.now()
#timeStr = dateTimeObj.strftime("%Y_%m_%d_%H_%M_%S_%f")
X_train_image.to_csv('datas/X_train_image.csv')
Y_train_StateClass_image.to_csv('datas/Y_train_StateClass_image.csv')
Y_train_FutPredict_image.to_csv('datas/Y_train_FutPredict_image.csv')
X_test_image.to_csv('datas/X_test_image.csv')
Y_test_StateClass_image.to_csv('datas/Y_test_StateClass_image.csv')
Y_test_FutPredict_image.to_csv('datas/Y_test_FutPredict_image.csv')
| [
"numpy.sqrt",
"numpy.random.default_rng",
"io.BytesIO",
"yfinance.pdr_override",
"numpy.array",
"cv2.imdecode",
"numpy.arange",
"pandas_datareader.data.get_data_yahoo",
"datetime.datetime",
"numpy.reshape",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"p... | [((10861, 10881), 'datetime.datetime', 'datetime', (['(1920)', '(1)', '(1)'], {}), '(1920, 1, 1)\n', (10869, 10881), False, 'from datetime import datetime\n'), ((10886, 10907), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(31)'], {}), '(2020, 7, 31)\n', (10894, 10907), False, 'from datetime import datetime\n'), ((10906, 10923), 'yfinance.pdr_override', 'yf.pdr_override', ([], {}), '()\n', (10921, 10923), True, 'import yfinance as yf\n'), ((10962, 11001), 'pandas_datareader.data.get_data_yahoo', 'pdr.get_data_yahoo', (['"""^GSPC"""', 'start', 'end'], {}), "('^GSPC', start, end)\n", (10980, 11001), True, 'from pandas_datareader import data as pdr\n'), ((568, 580), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (578, 580), False, 'import io\n'), ((727, 751), 'cv2.imdecode', 'cv2.imdecode', (['img_arr', '(1)'], {}), '(img_arr, 1)\n', (739, 751), False, 'import cv2\n'), ((762, 798), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (774, 798), False, 'import cv2\n'), ((1158, 1179), 'numpy.zeros', 'np.zeros', (['(32, 32, 3)'], {}), '((32, 32, 3))\n', (1166, 1179), True, 'import numpy as np\n'), ((1197, 1209), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1207, 1209), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1405), 'skimage.measure.block_reduce', 'skimage.measure.block_reduce', (['plot_img_np[90:620, 140:970]', '(18, 28, 1)', 'np.mean'], {}), '(plot_img_np[90:620, 140:970], (18, 28, 1), np.mean\n )\n', (1348, 1405), False, 'import skimage\n'), ((1449, 1463), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1458, 1463), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2060), 'numpy.min', 'np.min', (['x[indexforpast - tpastlag:indexforpast]'], {}), '(x[indexforpast - tpastlag:indexforpast])\n', (2019, 2060), True, 'import numpy as np\n'), ((2073, 2120), 'numpy.max', 'np.max', (['x[indexforpast - tpastlag:indexforpast]'], {}), '(x[indexforpast - tpastlag:indexforpast])\n', (2079, 2120), True, 'import numpy as np\n'), ((2634, 2648), 'numpy.max', 'np.max', (['df_tmp'], {}), '(df_tmp)\n', (2640, 2648), True, 'import numpy as np\n'), ((3014, 3035), 'numpy.zeros', 'np.zeros', (['(32, 32, 3)'], {}), '((32, 32, 3))\n', (3022, 3035), True, 'import numpy as np\n'), ((3053, 3065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3063, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3176, 3261), 'skimage.measure.block_reduce', 'skimage.measure.block_reduce', (['plot_img_np[90:620, 140:970]', '(18, 28, 1)', 'np.mean'], {}), '(plot_img_np[90:620, 140:970], (18, 28, 1), np.mean\n )\n', (3204, 3261), False, 'import skimage\n'), ((3305, 3319), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3314, 3319), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3748), 'numpy.zeros', 'np.zeros', (['(32, 32, 3)'], {}), '((32, 32, 3))\n', (3735, 3748), True, 'import numpy as np\n'), ((3762, 3797), 'matplotlib.pyplot.plot', 'plt.plot', (['sp500close[i - pastlag:i]'], {}), '(sp500close[i - pastlag:i])\n', (3770, 3797), True, 'import matplotlib.pyplot as plt\n'), ((3845, 3930), 'skimage.measure.block_reduce', 'skimage.measure.block_reduce', (['plot_img_np[90:620, 140:970]', '(18, 28, 1)', 'np.mean'], {}), '(plot_img_np[90:620, 140:970], (18, 28, 1), np.mean\n )\n', (3873, 3930), False, 'import skimage\n'), ((5858, 5954), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df_stockvaluecorrected.index', 'columns': "['market_state']", 'dtype': 'np.float64'}), "(index=df_stockvaluecorrected.index, columns=['market_state'],\n dtype=np.float64)\n", (5870, 5954), True, 'import pandas as pd\n'), ((6061, 6090), 'numpy.reshape', 'np.reshape', (['tmpimage', '(1, -1)'], {}), '(tmpimage, (1, -1))\n', (6071, 6090), True, 'import numpy as np\n'), ((6645, 6657), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6655, 6657), True, 'import matplotlib.pyplot as plt\n'), ((7090, 7197), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'np_x_image', 'columns': 'colname_d_x_image_flattened', 'index': 'df_stockvaluecorrected.index'}), '(data=np_x_image, columns=colname_d_x_image_flattened, index=\n df_stockvaluecorrected.index)\n', (7102, 7197), True, 'import pandas as pd\n'), ((7206, 7220), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7215, 7220), True, 'import matplotlib.pyplot as plt\n'), ((8717, 8741), 'numpy.zeros', 'np.zeros', (['ret_image_size'], {}), '(ret_image_size)\n', (8725, 8741), True, 'import numpy as np\n'), ((9346, 9446), 'pandas.concat', 'pd.concat', (["[xdf_data['market_state'], xdf_data['future_value'], xdf_data['df_x_image']]"], {'axis': '(1)'}), "([xdf_data['market_state'], xdf_data['future_value'], xdf_data[\n 'df_x_image']], axis=1)\n", (9355, 9446), True, 'import pandas as pd\n'), ((9656, 9679), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (9677, 9679), True, 'import numpy as np\n'), ((9697, 9716), 'numpy.arange', 'np.arange', (['nb_dates'], {}), '(nb_dates)\n', (9706, 9716), True, 'import numpy as np\n'), ((10594, 10625), 'numpy.zeros', 'np.zeros', (['(nb_train, 32, 32, 3)'], {}), '((nb_train, 32, 32, 3))\n', (10602, 10625), True, 'import numpy as np\n'), ((5500, 5512), 'numpy.sqrt', 'np.sqrt', (['(252)'], {}), '(252)\n', (5507, 5512), True, 'import numpy as np\n'), ((8541, 8590), 'numpy.array', 'np.array', (['tsindexstock[tsindexstock.columns[:-1]]'], {}), '(tsindexstock[tsindexstock.columns[:-1]])\n', (8549, 8590), True, 'import numpy as np\n'), ((8608, 8657), 'numpy.array', 'np.array', (['tsindexstock[tsindexstock.columns[:-1]]'], {}), '(tsindexstock[tsindexstock.columns[:-1]])\n', (8616, 8657), True, 'import numpy as np\n'), ((10659, 10690), 'numpy.array', 'np.array', (['X_train_image.iloc[i]'], {}), '(X_train_image.iloc[i])\n', (10667, 10690), True, 'import numpy as np\n'), ((6855, 6884), 'numpy.reshape', 'np.reshape', (['tmpimage', '(1, -1)'], {}), '(tmpimage, (1, -1))\n', (6865, 6884), True, 'import numpy as np\n')] |
import librosa
import numpy as np
import os
from librosa.display import specshow
import matplotlib.pyplot as plt
import IPython.display as ipd
from alcokit import HOP_LENGTH, SR, N_FFT
import pickle
def save_pickle(obj, path):
with open(path, "wb") as f:
f.write(pickle.dumps(obj))
return None
def load_pickle(path):
with open(path, "rb") as f:
obj = pickle.loads(f.read())
return obj
# OS
def is_audio_file(file):
return file.split(".")[-1] in ("wav", "aif", "aiff", "mp3", "m4a", "mp4") and "._" not in file
def flat_dir(directory, ext_filter=is_audio_file):
files = []
for root, dirname, filenames in os.walk(directory):
for f in filenames:
if ext_filter(f):
files += [os.path.join(root, f)]
return sorted(files)
def fs_dict(root, extension_filter=is_audio_file):
root_name = os.path.split(root.strip("/"))[-1]
items = [(d, list(filter(extension_filter, f))) for d, _, f in os.walk(root)]
if not items:
raise ValueError("no audio files found on path %s" % root)
return root_name, dict(item for item in items if len(item[1]) > 0)
# Conversion
normalize = librosa.util.normalize
a2db = lambda S: librosa.amplitude_to_db(abs(S), ref=S.max())
s2f = librosa.samples_to_frames
s2t = librosa.samples_to_time
f2s = librosa.frames_to_samples
f2t = librosa.frames_to_time
t2f = librosa.time_to_frames
t2s = librosa.time_to_samples
hz2m = librosa.hz_to_midi
m2hz = librosa.midi_to_hz
def m2b(m, sr=SR, n_fft=N_FFT):
step = (sr / 2) / (n_fft // 2)
return m2hz(m) / step
def b2m(b, sr=SR, n_fft=N_FFT):
step = (sr / 2) / (n_fft // 2)
return hz2m(b * step)
def delta_b(b, delta_m=1, sr=SR, n_fft=N_FFT):
"""
returns the size in bins of the interval delta_m (in midi) at bin `b`
"""
params = dict(sr=sr, n_fft=n_fft)
return b - m2b(b2m(b, **params) - delta_m, **params)
def unit_scale(x):
return (x - x.min()) / (x.max() - x.min())
# Debugging utils
def db(S):
if S.dtype == np.complex64:
S_hat = a2db(S.abs) + 40
elif S.min() >= 0 and S.dtype in (np.float, np.float32, np.float64, np.float_):
S_hat = a2db(S) + 40
else:
S_hat = a2db(S)
return S_hat
def signal(S, hop_length=HOP_LENGTH):
if S.dtype in (np.complex64, np.complex128):
return librosa.istft(S, hop_length=hop_length)
else:
return librosa.griffinlim(S, hop_length=hop_length, n_iter=32)
def audio(S, hop_length=HOP_LENGTH, sr=SR):
if len(S.shape) > 1:
y = signal(S, hop_length)
if y.size > 0:
return ipd.display(ipd.Audio(y, rate=sr))
else:
return ipd.display(ipd.Audio(np.zeros(hop_length*2), rate=sr))
else:
return ipd.display(ipd.Audio(S, rate=sr))
def playlist(iterable):
for seg in iterable:
audio(seg)
return
def playthrough(iterable, axis=1):
rv = np.concatenate(iterable, axis=axis)
return audio(rv)
def show(S, figsize=(), to_db=True, y_axis="linear", x_axis='frames', title=""):
S_hat = db(S) if to_db else S
if figsize:
plt.figure(figsize=figsize)
ax = specshow(S_hat, x_axis=x_axis, y_axis=y_axis, sr=SR)
plt.colorbar()
plt.tight_layout()
plt.title(title)
return ax
| [
"librosa.istft",
"pickle.dumps",
"matplotlib.pyplot.colorbar",
"os.path.join",
"librosa.display.specshow",
"matplotlib.pyplot.figure",
"IPython.display.Audio",
"numpy.zeros",
"numpy.concatenate",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"os.walk",
"librosa.griffinlim"
] | [((656, 674), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (663, 674), False, 'import os\n'), ((2933, 2968), 'numpy.concatenate', 'np.concatenate', (['iterable'], {'axis': 'axis'}), '(iterable, axis=axis)\n', (2947, 2968), True, 'import numpy as np\n'), ((3168, 3220), 'librosa.display.specshow', 'specshow', (['S_hat'], {'x_axis': 'x_axis', 'y_axis': 'y_axis', 'sr': 'SR'}), '(S_hat, x_axis=x_axis, y_axis=y_axis, sr=SR)\n', (3176, 3220), False, 'from librosa.display import specshow\n'), ((3225, 3239), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3237, 3239), True, 'import matplotlib.pyplot as plt\n'), ((3244, 3262), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3260, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3267, 3283), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3276, 3283), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2393), 'librosa.istft', 'librosa.istft', (['S'], {'hop_length': 'hop_length'}), '(S, hop_length=hop_length)\n', (2367, 2393), False, 'import librosa\n'), ((2419, 2474), 'librosa.griffinlim', 'librosa.griffinlim', (['S'], {'hop_length': 'hop_length', 'n_iter': '(32)'}), '(S, hop_length=hop_length, n_iter=32)\n', (2437, 2474), False, 'import librosa\n'), ((3131, 3158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3141, 3158), True, 'import matplotlib.pyplot as plt\n'), ((277, 294), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (289, 294), False, 'import pickle\n'), ((979, 992), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (986, 992), False, 'import os\n'), ((2783, 2804), 'IPython.display.Audio', 'ipd.Audio', (['S'], {'rate': 'sr'}), '(S, rate=sr)\n', (2792, 2804), True, 'import IPython.display as ipd\n'), ((2634, 2655), 'IPython.display.Audio', 'ipd.Audio', (['y'], {'rate': 'sr'}), '(y, rate=sr)\n', (2643, 2655), True, 'import IPython.display as ipd\n'), ((760, 781), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (772, 781), False, 'import os\n'), ((2712, 2736), 'numpy.zeros', 'np.zeros', (['(hop_length * 2)'], {}), '(hop_length * 2)\n', (2720, 2736), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
def run_jit(fapi, check):
for target in ["llvm", "stackvm"]:
if not tvm.runtime.enabled(target):
continue
f = tvm.driver.build(fapi, target=target)
s = f.get_source()
check(f)
def MakeAPILegacy(stmt, name, args, num_unpacked_args, noalias):
"""Legacy adapter to create a API"""
f = tvm.tir.PrimFunc(args, stmt).with_attr(
"global_symbol", tvm.runtime.String(name))
f = f.with_attr("tir.is_entry_func", True)
if noalias:
f = f.with_attr("tir.noalias", True)
mod = tvm.IRModule.from_expr(f)
return tvm.tir.transform.MakePackedAPI()(mod)
def test_stack_vm_basic():
a = tvm.nd.array(np.zeros(10, dtype='float32'))
@tvm.register_func
def tvm_call_back_get_shape(shape0):
print(shape0)
assert shape0 == a.shape[0]
n = te.size_var('n')
Ab = tvm.tir.decl_buffer((n, ), "float32")
stmt = tvm.tir.Evaluate(tvm.tir.call_packed("tvm_call_back_get_shape", Ab.shape[0]))
fapi = tvm.testing.MakeAPILegacy(stmt, "print_shape", [Ab], 0, True)
run_jit(fapi, lambda f: f(a))
@tvm.register_func
def tvm_stack_vm_print(*x):
print(x)
def test_stack_vm_loop():
dtype = 'int64'
n = te.size_var('n')
Ab = tvm.tir.decl_buffer((n, ), dtype)
i = te.size_var('i')
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
A[i + 1] = A[i] + 1
ib.emit(tvm.tir.call_packed("tvm_stack_vm_print", i))
stmt = ib.get()
fapi = tvm.testing.MakeAPILegacy(stmt, "ramp", [Ab], 0, True)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
def check(f):
f(a)
np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0]))
run_jit(fapi, check)
def test_stack_vm_cond():
dtype = 'int64'
n = te.size_var('n')
Ab = tvm.tir.decl_buffer((n, ), dtype)
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
with ib.if_scope(tvm.tir.EQ(i, 4)):
A[i + 1] = A[i] + 1
with ib.else_scope():
A[i + 1] = A[i] + 2
stmt = ib.get()
fapi = tvm.testing.MakeAPILegacy(stmt, "test", [Ab], 0, True)
def check(f):
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
y = np.arange(a.shape[0]) * 2
y[5:] -= 1
np.testing.assert_equal(a.asnumpy(), y)
run_jit(fapi, check)
def test_vm_parallel():
dtype = 'int64'
n = te.size_var('n')
Ab = tvm.tir.decl_buffer((n, ), dtype)
i = te.size_var('i')
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n, "i", for_type="parallel") as i:
A[i] = A[i] + 1
stmt = ib.get()
fapi = tvm.testing.MakeAPILegacy(stmt, "ramp", [Ab], 0, True)
def check(f):
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
np.testing.assert_equal(a.asnumpy(), np.ones(a.shape[0]))
run_jit(fapi, check)
if __name__ == "__main__":
test_vm_parallel()
test_stack_vm_loop()
test_stack_vm_basic()
test_stack_vm_cond()
| [
"tvm.tir.EQ",
"tvm.tir.ir_builder.create",
"tvm.tir.PrimFunc",
"tvm.te.size_var",
"numpy.ones",
"tvm.tir.transform.MakePackedAPI",
"tvm.runtime.String",
"numpy.zeros",
"tvm.tir.call_packed",
"tvm.testing.MakeAPILegacy",
"tvm.driver.build",
"tvm.runtime.enabled",
"tvm.IRModule.from_expr",
"... | [((1384, 1409), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['f'], {}), '(f)\n', (1406, 1409), False, 'import tvm\n'), ((1672, 1688), 'tvm.te.size_var', 'te.size_var', (['"""n"""'], {}), "('n')\n", (1683, 1688), False, 'from tvm import te\n'), ((1698, 1734), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['(n,)', '"""float32"""'], {}), "((n,), 'float32')\n", (1717, 1734), False, 'import tvm\n'), ((1836, 1897), 'tvm.testing.MakeAPILegacy', 'tvm.testing.MakeAPILegacy', (['stmt', '"""print_shape"""', '[Ab]', '(0)', '(True)'], {}), "(stmt, 'print_shape', [Ab], 0, True)\n", (1861, 1897), False, 'import tvm\n'), ((2049, 2065), 'tvm.te.size_var', 'te.size_var', (['"""n"""'], {}), "('n')\n", (2060, 2065), False, 'from tvm import te\n'), ((2075, 2107), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['(n,)', 'dtype'], {}), '((n,), dtype)\n', (2094, 2107), False, 'import tvm\n'), ((2117, 2133), 'tvm.te.size_var', 'te.size_var', (['"""i"""'], {}), "('i')\n", (2128, 2133), False, 'from tvm import te\n'), ((2144, 2171), 'tvm.tir.ir_builder.create', 'tvm.tir.ir_builder.create', ([], {}), '()\n', (2169, 2171), False, 'import tvm\n'), ((2363, 2417), 'tvm.testing.MakeAPILegacy', 'tvm.testing.MakeAPILegacy', (['stmt', '"""ramp"""', '[Ab]', '(0)', '(True)'], {}), "(stmt, 'ramp', [Ab], 0, True)\n", (2388, 2417), False, 'import tvm\n'), ((2646, 2662), 'tvm.te.size_var', 'te.size_var', (['"""n"""'], {}), "('n')\n", (2657, 2662), False, 'from tvm import te\n'), ((2672, 2704), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['(n,)', 'dtype'], {}), '((n,), dtype)\n', (2691, 2704), False, 'import tvm\n'), ((2716, 2743), 'tvm.tir.ir_builder.create', 'tvm.tir.ir_builder.create', ([], {}), '()\n', (2741, 2743), False, 'import tvm\n'), ((2984, 3038), 'tvm.testing.MakeAPILegacy', 'tvm.testing.MakeAPILegacy', (['stmt', '"""test"""', '[Ab]', '(0)', '(True)'], {}), "(stmt, 'test', [Ab], 0, True)\n", (3009, 3038), False, 'import tvm\n'), ((3305, 3321), 'tvm.te.size_var', 'te.size_var', (['"""n"""'], {}), "('n')\n", (3316, 3321), False, 'from tvm import te\n'), ((3331, 3363), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['(n,)', 'dtype'], {}), '((n,), dtype)\n', (3350, 3363), False, 'import tvm\n'), ((3373, 3389), 'tvm.te.size_var', 'te.size_var', (['"""i"""'], {}), "('i')\n", (3384, 3389), False, 'from tvm import te\n'), ((3399, 3426), 'tvm.tir.ir_builder.create', 'tvm.tir.ir_builder.create', ([], {}), '()\n', (3424, 3426), False, 'import tvm\n'), ((3568, 3622), 'tvm.testing.MakeAPILegacy', 'tvm.testing.MakeAPILegacy', (['stmt', '"""ramp"""', '[Ab]', '(0)', '(True)'], {}), "(stmt, 'ramp', [Ab], 0, True)\n", (3593, 3622), False, 'import tvm\n'), ((977, 1014), 'tvm.driver.build', 'tvm.driver.build', (['fapi'], {'target': 'target'}), '(fapi, target=target)\n', (993, 1014), False, 'import tvm\n'), ((1240, 1264), 'tvm.runtime.String', 'tvm.runtime.String', (['name'], {}), '(name)\n', (1258, 1264), False, 'import tvm\n'), ((1421, 1454), 'tvm.tir.transform.MakePackedAPI', 'tvm.tir.transform.MakePackedAPI', ([], {}), '()\n', (1452, 1454), False, 'import tvm\n'), ((1510, 1539), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': '"""float32"""'}), "(10, dtype='float32')\n", (1518, 1539), True, 'import numpy as np\n'), ((1764, 1823), 'tvm.tir.call_packed', 'tvm.tir.call_packed', (['"""tvm_call_back_get_shape"""', 'Ab.shape[0]'], {}), "('tvm_call_back_get_shape', Ab.shape[0])\n", (1783, 1823), False, 'import tvm\n'), ((2439, 2464), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'dtype'}), '(10, dtype=dtype)\n', (2447, 2464), True, 'import numpy as np\n'), ((915, 942), 'tvm.runtime.enabled', 'tvm.runtime.enabled', (['target'], {}), '(target)\n', (934, 942), False, 'import tvm\n'), ((1175, 1203), 'tvm.tir.PrimFunc', 'tvm.tir.PrimFunc', (['args', 'stmt'], {}), '(args, stmt)\n', (1191, 1203), False, 'import tvm\n'), ((2285, 2329), 'tvm.tir.call_packed', 'tvm.tir.call_packed', (['"""tvm_stack_vm_print"""', 'i'], {}), "('tvm_stack_vm_print', i)\n", (2304, 2329), False, 'import tvm\n'), ((2542, 2563), 'numpy.arange', 'np.arange', (['a.shape[0]'], {}), '(a.shape[0])\n', (2551, 2563), True, 'import numpy as np\n'), ((3082, 3107), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'dtype'}), '(10, dtype=dtype)\n', (3090, 3107), True, 'import numpy as np\n'), ((3134, 3155), 'numpy.arange', 'np.arange', (['a.shape[0]'], {}), '(a.shape[0])\n', (3143, 3155), True, 'import numpy as np\n'), ((3666, 3691), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'dtype'}), '(10, dtype=dtype)\n', (3674, 3691), True, 'import numpy as np\n'), ((3751, 3770), 'numpy.ones', 'np.ones', (['a.shape[0]'], {}), '(a.shape[0])\n', (3758, 3770), True, 'import numpy as np\n'), ((2838, 2854), 'tvm.tir.EQ', 'tvm.tir.EQ', (['i', '(4)'], {}), '(i, 4)\n', (2848, 2854), False, 'import tvm\n')] |
import numpy as np
from scipy import optimize
#%matplotlib inline
import matplotlib.pyplot as plt
def keynesian_cross(T, I, G, NX, a, b):
""" Draws the Keynesian cross with the 45-degree line and
the planned total spending as a function of total production.
Args:
T (float): Taxs
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
Return: Figure
"""
# The data vector to be plotted for production and aggregate expenditure:
Y_arrey = np.linspace(0,300)
AD_arrey = (a + b * (Y_arrey - T) + I + G + NX)
degree = Y_arrey
# The figure
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(1,1,1)
ax.plot(Y_arrey, degree, label="45-degree line", color='lightblue',linewidth=3)
ax.plot(Y_arrey, AD_arrey, label="AD=C+I+G+NX", color='darkorange',linewidth=3)
ax.set_xlabel("Y")
ax.set_ylabel("AD")
ax.legend(loc="upper left")
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return
def cross_equalibrium(T, I, G, NX, a, b):
""" The equalibrium for the Keynesian cross where aggregate expenditure equals total production
Args:
T (float): Tax
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
Returns:
Result: Production in equalibrium, Y (float)
"""
return 1/(1-b) * (I + G + NX + a - b*T)
def keynesian_cross_NXshift(T, I, G, NX, a, b, delta_NX):
""" Steady state for the Keynesian cross where aggregate expenditure equals total production
Args:
AD (float): Aggregate expenditure
Y (float): Total production
T (float): Tax
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
delta_NX (float): Net export shift in
Returns:
Result: Figure
"""
# The equation setup
NX2 = NX + delta_NX
Y_arrey = np.linspace(0,300)
AD_arrey = (a + b * (Y_arrey - T) + I + G + NX)
AD2_arrey = (a + b * (Y_arrey - T) + I + G + NX2)
degree = Y_arrey
# The figure
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(1,1,1)
ax.plot(Y_arrey, degree, label="45-degree line", color='lightblue')
ax.plot(Y_arrey, AD_arrey, label="AD=C+I+G+NX", color='orange')
ax.plot(Y_arrey, AD2_arrey, label="AD'=C+I+G+NX'", color='red')
ax.set_xlabel("Y")
ax.set_ylabel("AD")
ax.legend(loc="upper left")
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return
def num_opt(Y_goal,T,I,G,a,b):
""" Numerical optimazation to calculate value of NX to optain production goal
Args:
Y_goal (float): Production goal
T (float): Tax
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
Returns:
Result: NX (float)
"""
# Object function to be optimized:
obj = lambda NX: (cross_equalibrium(T, I, G, NX, a, b) - Y_goal)**2
# Initial guess
x0 = 10
return optimize.minimize(obj,x0)
def keynesian_cross_NXshift_t(k, t, I, G, NX, a, b, delta_NX):
""" Steady state for the Keynesian cross where aggregate expenditure equals total production
Args:
AD (float): Aggregate expenditure
Y (float): Total production
k (float): Base tax
t (float): Marginal tax rate
a (float): Constant consumption, a>0
b (float): Marginal consumption rate, 0<b<1
I (float): Investment
G (float): Public expenditure
NX (float): Net export
delta_NX (float): Net export shift in
Returns:
Result: Figure
"""
# The equation setup and generating of data arreys:
NX2 = NX + delta_NX
Y_arrey = np.linspace(0,300)
AD_arrey = (a + b * (Y_arrey - (k + b*t)) + I + G + NX)
AD2_arrey = (a + b * (Y_arrey - (k + b*t)) + I + G + NX2)
degree = Y_arrey
# The figure:
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(1,1,1)
ax.plot(Y_arrey, degree, label="45-degree line", color='lightblue')
ax.plot(Y_arrey, AD_arrey, label="AD=C+I+G+NX", color='orange')
ax.plot(Y_arrey, AD2_arrey, label="AD'=C+I+G+NX'", color='red')
ax.set_xlabel("Y")
ax.set_ylabel("AD")
ax.legend(loc="upper left")
ax.grid()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return | [
"numpy.linspace",
"scipy.optimize.minimize",
"matplotlib.pyplot.figure"
] | [((624, 643), 'numpy.linspace', 'np.linspace', (['(0)', '(300)'], {}), '(0, 300)\n', (635, 643), True, 'import numpy as np\n'), ((744, 771), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (754, 771), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2374), 'numpy.linspace', 'np.linspace', (['(0)', '(300)'], {}), '(0, 300)\n', (2366, 2374), True, 'import numpy as np\n'), ((2529, 2556), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2539, 2556), True, 'import matplotlib.pyplot as plt\n'), ((3627, 3653), 'scipy.optimize.minimize', 'optimize.minimize', (['obj', 'x0'], {}), '(obj, x0)\n', (3644, 3653), False, 'from scipy import optimize\n'), ((4349, 4368), 'numpy.linspace', 'np.linspace', (['(0)', '(300)'], {}), '(0, 300)\n', (4360, 4368), True, 'import numpy as np\n'), ((4540, 4567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4550, 4567), True, 'import matplotlib.pyplot as plt\n')] |
"""
act.retrievals.stability_indices
--------------------------------
Module that adds stability indicies to a dataset.
"""
import warnings
import numpy as np
try:
from pkg_resources import DistributionNotFound
import metpy.calc as mpcalc
METPY_AVAILABLE = True
except ImportError:
METPY_AVAILABLE = False
except (ModuleNotFoundError, DistributionNotFound):
warnings.warn("MetPy is installed but could not be imported. " +
"Please check your MetPy installation. Some features " +
"will be disabled.", ImportWarning)
METPY_AVAILABLE = False
if METPY_AVAILABLE:
from metpy.units import units
def calculate_stability_indicies(ds, temp_name="temperature",
td_name="dewpoint_temperature",
p_name="pressure",
moving_ave_window=0):
"""
Function for calculating stability indices from sounding data.
Parameters
----------
ds : ACT dataset
The dataset to compute the stability indicies of. Must have
temperature, dewpoint, and pressure in vertical coordinates.
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
moving_ave_window : int
Number of points to do a moving average on sounding data to reduce
noise. This is useful if noise in the sounding is preventing parcel
ascent.
Returns
-------
ds : ACT dataset
An ACT dataset with additional stability indicies added.
"""
if not METPY_AVAILABLE:
raise ImportError("MetPy need to be installed on your system to " +
"calculate stability indices")
t = ds[temp_name]
td = ds[td_name]
p = ds[p_name]
if not hasattr(t, "units"):
raise AttributeError("Temperature field must have units" +
" for ACT to discern!")
if not hasattr(td, "units"):
raise AttributeError("Dewpoint field must have units" +
" for ACT to discern!")
if not hasattr(p, "units"):
raise AttributeError("Pressure field must have units" +
" for ACT to discern!")
if t.units == "C":
t_units = units.degC
else:
t_units = getattr(units, t.units)
if td.units == "C":
td_units = units.degC
else:
td_units = getattr(units, td.units)
p_units = getattr(units, p.units)
# Sort all values by decreasing pressure
t_sorted = np.array(t.values)
td_sorted = np.array(td.values)
p_sorted = np.array(p.values)
ind_sort = np.argsort(p_sorted)
t_sorted = t_sorted[ind_sort[-1:0:-1]]
td_sorted = td_sorted[ind_sort[-1:0:-1]]
p_sorted = p_sorted[ind_sort[-1:0:-1]]
if moving_ave_window > 0:
t_sorted = np.convolve(
t_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
td_sorted = np.convolve(
td_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
p_sorted = np.convolve(
p_sorted, np.ones((moving_ave_window,)) / moving_ave_window)
t_sorted = t_sorted * t_units
td_sorted = td_sorted * td_units
p_sorted = p_sorted * p_units
t_profile = mpcalc.parcel_profile(
p_sorted, t_sorted[0], td_sorted[0])
# Calculate parcel trajectory
ds["parcel_temperature"] = t_profile.magnitude
ds["parcel_temperature"].attrs['units'] = t_profile.units
# Calculate CAPE, CIN, LCL
sbcape, sbcin = mpcalc.surface_based_cape_cin(
p_sorted, t_sorted, td_sorted)
lcl = mpcalc.lcl(
p_sorted[0], t_sorted[0], td_sorted[0])
try:
lfc = mpcalc.lfc(
p_sorted[0], t_sorted[0], td_sorted[0])
except IndexError:
lfc = np.nan * p_sorted.units
mucape, mucin = mpcalc.most_unstable_cape_cin(
p_sorted, t_sorted, td_sorted)
where_500 = np.argmin(np.abs(p_sorted - 500 * units.hPa))
li = t_sorted[where_500] - t_profile[where_500]
ds["surface_based_cape"] = sbcape.magnitude
ds["surface_based_cape"].attrs['units'] = "J/kg"
ds["surface_based_cape"].attrs['long_name'] = "Surface-based CAPE"
ds["surface_based_cin"] = sbcin.magnitude
ds["surface_based_cin"].attrs['units'] = "J/kg"
ds["surface_based_cin"].attrs['long_name'] = "Surface-based CIN"
ds["most_unstable_cape"] = mucape.magnitude
ds["most_unstable_cape"].attrs['units'] = "J/kg"
ds["most_unstable_cape"].attrs['long_name'] = "Most unstable CAPE"
ds["most_unstable_cin"] = mucin.magnitude
ds["most_unstable_cin"].attrs['units'] = "J/kg"
ds["most_unstable_cin"].attrs['long_name'] = "Most unstable CIN"
ds["lifted_index"] = li.magnitude
ds["lifted_index"].attrs['units'] = t_profile.units
ds["lifted_index"].attrs['long_name'] = "Lifted index"
ds["level_of_free_convection"] = lfc.magnitude
ds["level_of_free_convection"].attrs['units'] = lfc.units
ds["level_of_free_convection"].attrs['long_name'] = "Level of free convection"
ds["lifted_condensation_level_temperature"] = lcl[1].magnitude
ds["lifted_condensation_level_temperature"].attrs['units'] = lcl[1].units
ds["lifted_condensation_level_temperature"].attrs['long_name'] = "Lifted condensation level temperature"
ds["lifted_condensation_level_pressure"] = lcl[0].magnitude
ds["lifted_condensation_level_pressure"].attrs['units'] = lcl[0].units
ds["lifted_condensation_level_pressure"].attrs['long_name'] = "Lifted condensation level pressure"
return ds
| [
"numpy.abs",
"numpy.ones",
"metpy.calc.lcl",
"metpy.calc.most_unstable_cape_cin",
"numpy.argsort",
"numpy.array",
"metpy.calc.lfc",
"warnings.warn",
"metpy.calc.parcel_profile",
"metpy.calc.surface_based_cape_cin"
] | [((2642, 2660), 'numpy.array', 'np.array', (['t.values'], {}), '(t.values)\n', (2650, 2660), True, 'import numpy as np\n'), ((2677, 2696), 'numpy.array', 'np.array', (['td.values'], {}), '(td.values)\n', (2685, 2696), True, 'import numpy as np\n'), ((2712, 2730), 'numpy.array', 'np.array', (['p.values'], {}), '(p.values)\n', (2720, 2730), True, 'import numpy as np\n'), ((2746, 2766), 'numpy.argsort', 'np.argsort', (['p_sorted'], {}), '(p_sorted)\n', (2756, 2766), True, 'import numpy as np\n'), ((3369, 3427), 'metpy.calc.parcel_profile', 'mpcalc.parcel_profile', (['p_sorted', 't_sorted[0]', 'td_sorted[0]'], {}), '(p_sorted, t_sorted[0], td_sorted[0])\n', (3390, 3427), True, 'import metpy.calc as mpcalc\n'), ((3637, 3697), 'metpy.calc.surface_based_cape_cin', 'mpcalc.surface_based_cape_cin', (['p_sorted', 't_sorted', 'td_sorted'], {}), '(p_sorted, t_sorted, td_sorted)\n', (3666, 3697), True, 'import metpy.calc as mpcalc\n'), ((3717, 3767), 'metpy.calc.lcl', 'mpcalc.lcl', (['p_sorted[0]', 't_sorted[0]', 'td_sorted[0]'], {}), '(p_sorted[0], t_sorted[0], td_sorted[0])\n', (3727, 3767), True, 'import metpy.calc as mpcalc\n'), ((3946, 4006), 'metpy.calc.most_unstable_cape_cin', 'mpcalc.most_unstable_cape_cin', (['p_sorted', 't_sorted', 'td_sorted'], {}), '(p_sorted, t_sorted, td_sorted)\n', (3975, 4006), True, 'import metpy.calc as mpcalc\n'), ((381, 546), 'warnings.warn', 'warnings.warn', (["('MetPy is installed but could not be imported. ' +\n 'Please check your MetPy installation. Some features ' +\n 'will be disabled.')", 'ImportWarning'], {}), "('MetPy is installed but could not be imported. ' +\n 'Please check your MetPy installation. Some features ' +\n 'will be disabled.', ImportWarning)\n", (394, 546), False, 'import warnings\n'), ((3800, 3850), 'metpy.calc.lfc', 'mpcalc.lfc', (['p_sorted[0]', 't_sorted[0]', 'td_sorted[0]'], {}), '(p_sorted[0], t_sorted[0], td_sorted[0])\n', (3810, 3850), True, 'import metpy.calc as mpcalc\n'), ((4043, 4077), 'numpy.abs', 'np.abs', (['(p_sorted - 500 * units.hPa)'], {}), '(p_sorted - 500 * units.hPa)\n', (4049, 4077), True, 'import numpy as np\n'), ((2983, 3012), 'numpy.ones', 'np.ones', (['(moving_ave_window,)'], {}), '((moving_ave_window,))\n', (2990, 3012), True, 'import numpy as np\n'), ((3090, 3119), 'numpy.ones', 'np.ones', (['(moving_ave_window,)'], {}), '((moving_ave_window,))\n', (3097, 3119), True, 'import numpy as np\n'), ((3195, 3224), 'numpy.ones', 'np.ones', (['(moving_ave_window,)'], {}), '((moving_ave_window,))\n', (3202, 3224), True, 'import numpy as np\n')] |
#
# tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow
# Copyright 2018 <NAME>, <NAME>, <NAME>, <NAME>
#
# Plume data generation, 2D
#
from manta import *
import os, shutil, math, sys
import numpy as np
sys.path.append("../tools")
import paramhelpers as ph
simId = 2006
simPath = '../2ddata_sim/'
simPath,simId = ph.getNextSimPath(simId, simPath)
# how much to reduce target sim size
targetFac = 0.25
savenpz = 1
# source solver params
dim = 2
#res = 128
res = 512
gs = vec3(res,int(1.0*res),res)
if (dim==2): gs.z = 1 # 2D
sm = Solver(name='main', gridSize = gs, dim=dim)
sm.timestep = 1.5
sm.timestep = 0.75
# inflow noise field
noise = NoiseField( parent=sm, fixedSeed=265, loadFromFile=True)
noise.posScale = vec3(24)
noise.clamp = True
noise.clampNeg = 0
noise.clampPos = 2
noise.valScale = 1
noise.valOffset = 0.075
noise.timeAnim = 0.3
noise.timeAnim = 0.5
cylWidth = 0.13
source = Cylinder(parent=sm, center=gs*vec3(0.5,0.1,0.5), radius=res*cylWidth, z=gs*vec3(0, 0.04, 0))
# target solver, recompute sizes...
target_gs = vec3(targetFac*gs.x,targetFac*gs.y,targetFac*gs.z)
if (dim==2): target_gs.z = 1 # 2D
targs = Solver(name='target', gridSize = target_gs, dim=dim)
targs.timestep = sm.timestep
dummy = targs.create(MACGrid)
target_flags = targs.create(FlagGrid)
target_vel = targs.create(MACGrid)
target_density = targs.create(RealGrid)
target_flags.initDomain()
target_flags.fillGrid()
target_source = Cylinder(parent=targs, center=target_gs*vec3(0.5,0.1,0.5), radius=res*targetFac*cylWidth, z=target_gs*vec3(0, 0.04, 0))
if savenpz:
arR = np.zeros([int(gs.z), int(gs.y), int(gs.x), 1])
arV = np.zeros([int(gs.z), int(gs.y), int(gs.x), 3])
target_arR = np.zeros([int(target_gs.z), int(target_gs.y), int(target_gs.x), 1])
target_arV = np.zeros([int(target_gs.z), int(target_gs.y), int(target_gs.x), 3])
# allocate other grids
flags = sm.create(FlagGrid)
vel = sm.create(MACGrid)
density = sm.create(RealGrid)
pressure = sm.create(RealGrid)
blurden = sm.create(RealGrid)
blurvel = sm.create(MACGrid)
bWidth=0
flags.initDomain(boundaryWidth=bWidth)
flags.fillGrid()
setOpenBound(flags,bWidth,'yY',FlagOutflow|FlagEmpty)
if (GUI):
gui = Gui()
gui.setCamPos(0., 0., -1.3)
gui.show()
# main loop
for t in range(400):
mantaMsg('\nFrame %i, simulation time %f' % (sm.frame, sm.timeTotal))
advectSemiLagrange(flags=flags, vel=vel, grid=density, order=2, clampMode=2 )
advectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2, clampMode=2 , openBounds=True, boundaryWidth=bWidth )
applyInflow=False
if (sm.timeTotal>=0 and sm.timeTotal<150.):
densityInflow( flags=flags, density=density, noise=noise, shape=source, scale=1, sigma=0.5 )
applyInflow=True
setWallBcs(flags=flags, vel=vel)
#addBuoyancy(density=density, vel=vel, gravity=vec3(0,-1e-3,0), flags=flags)
addBuoyancy(density=density, vel=vel, gravity=vec3(0,-2e-4,0), flags=flags)
#vorticityConfinement( vel=vel, flags=flags, strength=0.1 )
solvePressure(flags=flags, vel=vel, pressure=pressure , cgMaxIterFac=1.0, cgAccuracy=0.01 )
setWallBcs(flags=flags, vel=vel)
sm.step()
# copy to target
if 1:
blurSig = float(1./targetFac) / 3.544908 # 3.544908 = 2 * sqrt( PI )
blurRealGrid( density, blurden, blurSig)
interpolateGrid( target=target_density, source=blurden )
blurMacGrid( vel, blurvel, blurSig)
interpolateMACGrid( target=target_vel, source=blurvel )
target_vel.multConst( vec3(targetFac) )
# save uni files here, 1 means saveuni
if 1 and t%2==0:
frameNr = t / 2
target_vel.save("../2ddata_sim/sim_%s/velocity_low_%04d.uni" % (simId,frameNr) )
target_density.save("../2ddata_sim/sim_%s/density_low_%04d.uni" % (simId,frameNr) )
density.save("../2ddata_sim/sim_%s/density_high_%04d.uni" % (simId,frameNr) )
#gui.screenshot( 'plume_%04d.png' % frameNr );
if savenpz and t%2==0:
tf = t / 2
print("Writing NPZs for frame %d"%tf)
copyGridToArrayReal( target=target_arR, source=target_density )
np.savez_compressed( simPath + 'density_low_%04d.npz' % (tf), target_arR )
copyGridToArrayVec3( target=target_arV, source=target_vel )
np.savez_compressed( simPath + 'velocity_low_%04d.npz' % (tf), target_arV )
copyGridToArrayReal( target=arR, source=density )
np.savez_compressed( simPath + 'density_high_%04d.npz' % (tf), arR )
copyGridToArrayVec3( target=arV, source=vel )
np.savez_compressed( simPath + 'velocity_high_%04d.npz' % (tf), arV )
targs.step()
| [
"paramhelpers.getNextSimPath",
"sys.path.append",
"numpy.savez_compressed"
] | [((233, 260), 'sys.path.append', 'sys.path.append', (['"""../tools"""'], {}), "('../tools')\n", (248, 260), False, 'import os, shutil, math, sys\n'), ((344, 377), 'paramhelpers.getNextSimPath', 'ph.getNextSimPath', (['simId', 'simPath'], {}), '(simId, simPath)\n', (361, 377), True, 'import paramhelpers as ph\n'), ((4026, 4096), 'numpy.savez_compressed', 'np.savez_compressed', (["(simPath + 'density_low_%04d.npz' % tf)", 'target_arR'], {}), "(simPath + 'density_low_%04d.npz' % tf, target_arR)\n", (4045, 4096), True, 'import numpy as np\n'), ((4165, 4236), 'numpy.savez_compressed', 'np.savez_compressed', (["(simPath + 'velocity_low_%04d.npz' % tf)", 'target_arV'], {}), "(simPath + 'velocity_low_%04d.npz' % tf, target_arV)\n", (4184, 4236), True, 'import numpy as np\n'), ((4295, 4359), 'numpy.savez_compressed', 'np.savez_compressed', (["(simPath + 'density_high_%04d.npz' % tf)", 'arR'], {}), "(simPath + 'density_high_%04d.npz' % tf, arR)\n", (4314, 4359), True, 'import numpy as np\n'), ((4414, 4479), 'numpy.savez_compressed', 'np.savez_compressed', (["(simPath + 'velocity_high_%04d.npz' % tf)", 'arV'], {}), "(simPath + 'velocity_high_%04d.npz' % tf, arV)\n", (4433, 4479), True, 'import numpy as np\n')] |
"""
### BEGIN NODE INFO
[info]
name = ARTIQ Server
version = 1.0
description = Pulser using the ARTIQ box. Backwards compatible with old pulse sequences and experiments.
instancename = ARTIQ Server
[startup]
cmdline = %PYTHON% %FILE%
timeout = 20
[shutdown]
message = 987654321
timeout = 20
### END NODE INFO
"""
# labrad imports
from labrad.server import LabradServer, setting, Signal
from twisted.internet.threads import deferToThread
from twisted.internet.defer import DeferredLock, inlineCallbacks, returnValue
# artiq imports
from artiq_api import ARTIQ_api
from artiq.experiment import *
from artiq.master.databases import DeviceDB
from artiq.master.worker_db import DeviceManager
from sipyco.pc_rpc import Client
# device imports
from artiq.coredevice.ad9910 import _AD9910_REG_FTW, _AD9910_REG_ASF, _AD9910_REG_POW
from artiq.coredevice.comm_moninj import CommMonInj, TTLProbe, TTLOverride
from artiq.coredevice.ad53xx import AD53XX_READ_X1A, AD53XX_READ_X1B, AD53XX_READ_OFFSET,\
AD53XX_READ_GAIN, AD53XX_READ_OFS0, AD53XX_READ_OFS1,\
AD53XX_READ_AB0, AD53XX_READ_AB1, AD53XX_READ_AB2, AD53XX_READ_AB3
from artiq.coredevice.ad9910 import _AD9910_REG_FTW, _AD9910_REG_POW, _AD9910_REG_ASF
AD53XX_REGISTERS = {'X1A': AD53XX_READ_X1A, 'X1B': AD53XX_READ_X1B, 'OFF': AD53XX_READ_OFFSET,
'GAIN': AD53XX_READ_GAIN, 'OFS0': AD53XX_READ_OFS1, 'OFS1': AD53XX_READ_OFS1,
'AB0': AD53XX_READ_AB0, 'AB1': AD53XX_READ_AB1, 'AB2': AD53XX_READ_AB2,
'AB3': AD53XX_READ_AB3}
# th1.inject(8, TTLOverride.level.value, 1)
# th1.inject(8, TTLOverride.oe.value, 1)
# th1.inject(8, TTLOverride.en.value, 1)
# function imports
import numpy as np
import asyncio
import time
TTLSIGNAL_ID = 828176
DACSIGNAL_ID = 828175
class ARTIQ_Server(LabradServer):
"""ARTIQ server."""
name = 'ARTIQ Server'
regKey = 'ARTIQ_Server'
# SIGNALS
ttlChanged = Signal(TTLSIGNAL_ID, 'signal: ttl changed', '(sib)')
dacChanged = Signal(DACSIGNAL_ID, 'signal: dac changed', '(ssv)')
# STARTUP
def __init__(self):
self.api = ARTIQ_api()
self.ddb_filepath = 'C:\\Users\\EGGS1\\Documents\\ARTIQ\\artiq-master\\device_db.py'
self.devices = DeviceDB(self.ddb_filepath)
self.device_manager = DeviceManager(self.devices)
LabradServer.__init__(self)
@inlineCallbacks
def initServer(self):
self.listeners = set()
yield self._setClients()
yield self._setVariables()
yield self._setDevices()
self.ttlChanged(('ttl99', 0, True))
#@inlineCallbacks
def _setClients(self):
"""
Create clients to ARTIQ master.
Used to get datasets, submit experiments, and monitor devices.
"""
self.scheduler = Client('::1', 3251, 'master_schedule')
self.datasets = Client('::1', 3251, 'master_dataset_db')
def _setVariables(self):
"""
Sets ARTIQ-related variables.
"""
# used to ensure atomicity
self.inCommunication = DeferredLock()
# pulse sequencer variables
self.ps_filename = 'C:\\Users\\EGGS1\\Documents\\Code\\EGGS_labrad\\lib\\servers\\pulser\\run_ps.py'
self.ps_rid = None
# conversions
# dds
dds_tmp = list(self.api.dds_list.values())[0]
self.seconds_to_mu = self.api.core.seconds_to_mu
self.amplitude_to_asf = dds_tmp.amplitude_to_asf
self.frequency_to_ftw = dds_tmp.frequency_to_ftw
self.turns_to_pow = dds_tmp.turns_to_pow
self.dbm_to_fampl = lambda dbm: 10**(float(dbm/10))
#dac
from artiq.coredevice.ad53xx import voltage_to_mu #, ad53xx_cmd_read_ch
self.voltage_to_mu = voltage_to_mu
# self.dac_read_code = ad53xx_cmd_read_ch
# sampler
from artiq.coredevice.sampler import adc_mu_to_volt
self.adc_mu_to_volt = adc_mu_to_volt
#@inlineCallbacks
def _setDevices(self):
"""
Get the list of devices in the ARTIQ box.
"""
self.device_db = self.devices.get_device_db()
self.ttlout_list = list(self.api.ttlout_list.keys())
self.ttlin_list = list(self.api.ttlin_list.keys())
self.dds_list = list(self.api.dds_list.keys())
# needed for moninj
ttl_all_list = self.ttlout_list + self.ttlin_list
self.ttl_channel_to_name = {self.device_db[ttl_name]['arguments']['channel']: ttl_name for ttl_name in ttl_all_list}
self.dac_channel = self.device_db['spi_zotino0']['arguments']['channel']
# CORE
@setting(21, "Get Devices", returns='*s')
def getDevices(self, c):
"""
Returns a list of ARTIQ devices.
"""
self.ttlChanged(('ttl99', 0, True))
return list(self.device_db.keys())
# PULSE SEQUENCING
@setting(111, "Run Experiment", path='s', maxruns='i', returns='')
def runExperiment(self, c, path, maxruns = 1):
"""
Run the experiment a given number of times.
Argument:
path (string): the filepath to the ARTIQ experiment.
maxruns (int) : the number of times to run the experiment
"""
# set pipeline, priority, and expid
ps_pipeline = 'PS'
ps_priority = 1
ps_expid = {'log_level': 30,
'file': path,
'class_name': None,
'arguments': {'maxRuns': maxruns,
'linetrigger_enabled': self.linetrigger_enabled,
'linetrigger_delay_us': self.linetrigger_delay,
'linetrigger_ttl_name': self.linetrigger_ttl}}
# run sequence then wait for experiment to submit
yield self.inCommunication.acquire()
self.ps_rid = yield deferToThread(self.scheduler.submit, pipeline_name = ps_pipeline, expid = ps_expid, priority = ps_priority)
self.inCommunication.release()
@setting(112, "Stop Experiment", returns='')
def stopSequence(self, c):
"""
Stops any currently running sequence.
"""
# check that an experiment is currently running
if self.ps_rid not in self.scheduler.get_status().keys():
raise Exception('Error: no experiment currently running')
yield self.inCommunication.acquire()
yield deferToThread(self.scheduler.delete, self.ps_rid)
self.ps_rid = None
#todo: make resetting of ps_rid contingent on defertothread completion
self.inCommunication.release()
@setting(113, "Runs Completed", returns='i')
def runsCompleted(self, c):
"""
Check how many iterations of the experiment have been completed.
"""
completed_runs = yield self.datasets.get('numRuns')
returnValue(completed_runs)
# TTL
@setting(211, 'TTL Get', returns='*s')
def getTTL(self, c):
"""
Returns all available TTL channels
"""
return self.ttlout_list
@setting(221, "TTL Set", ttl_name='s', state='b', returns='')
def setTTL(self, c, ttl_name, state):
"""
Manually set a TTL to the given state.
Arguments:
ttl_name (str) : name of the ttl
state (bool) : ttl power state
"""
if ttl_name not in self.ttlout_list:
raise Exception('Error: device does not exist.')
yield self.api.setTTL(ttl_name, state)
# DDS
@setting(311, "DDS Get", returns='*s')
def getDDS(self, c):
"""
Get the list of available DDS (AD5372) channels.
Returns:
(*str) : the list of dds names
"""
dds_list = yield self.api.dds_list.keys()
returnValue(list(dds_list))
@setting(321, "DDS Initialize", dds_name='s', returns='')
def initializeDDS(self, c, dds_name):
"""
Resets/initializes the DDSs.
Arguments:
dds_name (str) : the name of the dds
"""
if dds_name not in self.dds_list:
raise Exception('Error: device does not exist.')
yield self.api.initializeDDS(dds_name)
@setting(322, "DDS Toggle", dds_name='s', state='b', returns='')
def toggleDDS(self, c, dds_name, state):
"""
Manually toggle a DDS via the RF switch
Arguments:
dds_name (str) : the name of the dds
state (bool) : power state
"""
if dds_name not in self.dds_list:
raise Exception('Error: device does not exist.')
yield self.api.toggleDDS(dds_name, state)
@setting(323, "DDS Waveform", dds_name='s', param='s', param_val='v', returns='')
def setDDSWav(self, c, dds_name, param, param_val):
"""
Manually set a DDS to the given parameters.
Arguments:
dds_name (str) : the name of the dds
param (str) : the parameter to set
param_val (float) : the value of the parameter
"""
#todo: check input
if dds_name not in self.dds_list:
raise Exception('Error: device does not exist.')
if param.lower() in ('frequency', 'f'):
ftw = yield self.frequency_to_ftw(param_val)
yield self.api.setDDS(dds_name, 0, ftw)
elif param.lower() in ('amplitude', 'a'):
asf = yield self.amplitude_to_asf(param_val)
yield self.api.setDDS(dds_name, 1, asf)
elif param.lower() in ('phase', 'p'):
if param_val >= 1 or pow < 0:
raise Exception('Error: phase outside bounds of [0,1]')
pow = yield self.turns_to_pow(param_val)
yield self.api.setDDS(dds_name, 2, pow)
@setting(326, "DDS Attenuation", dds_name='s', att='v', units='s', returns='')
def setDDSAtt(self, c, dds_name, att, units):
"""
Manually set a DDS to the given parameters.
Arguments:
dds_name (str) : the name of the dds
att (float) : attenuation (in dBm)
"""
if dds_name not in self.dds_list:
raise Exception('Error: device does not exist.')
#todo: check input
#todo: sort out units
att_mu = att
yield self.api.setDDSAtt(dds_name, att_mu)
@setting(331, "DDS Read", dds_name='s', addr='i', length='i', returns='w')
def readDDS(self, c, dds_name, addr, length):
"""
Read the value of a DDS register.
Arguments:
dds_name (str) : the name of the dds
addr (int) : the address to read from
length (int) : how many bits to read
Returns:
(word) : the register value
"""
if dds_name not in self.dds_list:
raise Exception('Error: device does not exist.')
elif length not in (16, 32):
raise Exception('Error: invalid read length. Must be one of (16, 32).')
reg_val = yield self.api.readDDS(dds_name, addr, length)
returnValue(reg_val)
# DAC
@setting(411, "DAC Initialize", returns='')
def initializeDAC(self, c):
"""
Manually initialize the DAC.
"""
yield self.api.initializeDAC()
@setting(421, "DAC Set", dac_num='i', value='v', units='s', returns='')
def setDAC(self, c, dac_num, value, units):
"""
Manually set the voltage of a DAC channel.
Arguments:
dac_num (int) : the DAC channel number
value (float) : the value to write to the DAC register
units (str) : the voltage units, either 'mu' or 'v'
"""
voltage_mu = None
# check that dac channel is valid
if (dac_num > 31) or (dac_num < 0):
raise Exception('Error: device does not exist.')
if units == 'v':
voltage_mu = yield self.voltage_to_mu(value)
elif units == 'mu':
if (value < 0) or (value > 0xffff):
raise Exception('Error: invalid DAC Voltage!')
voltage_mu = int(value)
yield self.api.setDAC(dac_num, voltage_mu)
@setting(422, "DAC Gain", dac_num='i', gain='v', units='s', returns='')
def setDACGain(self, c, dac_num, gain, units):
"""
Manually set the gain of a DAC channel.
Arguments:
dac_num (int) : the DAC channel number
gain (float) : the DAC channel gain
units (str) : the gain units, either 'mu' or 'dB'
"""
gain_mu = None
# only 32 channels per DAC
if (dac_num > 31) or (dac_num < 0):
raise Exception('Error: device does not exist.')
if units == 'todo':
gain_mu = int(gain * 0xffff) - 1
elif units == 'mu':
gain_mu = int(gain)
# check that gain is valid
if gain < 0 or gain > 0xffff:
raise Exception('Error: gain outside bounds of [0,1]')
yield self.api.setDACGain(dac_num, gain_mu)
@setting(423, "DAC Offset", dac_num='i', value='v', units='s', returns='')
def setDACOffset(self, c, dac_num, value, units):
"""
Manually set the offset voltage of a DAC channel.
Arguments:
dac_num (int) : the DAC channel number
value (float) : the value to write to the DAC offset register
units (str) : the voltage units, either 'mu' or 'v'
"""
voltage_mu = None
# check that dac channel is valid
if (dac_num > 31) or (dac_num < 0):
raise Exception('Error: device does not exist.')
if units == 'v':
voltage_mu = yield self.voltage_to_mu(value)
elif units == 'mu':
if (value < 0) or (value > 0xffff):
raise Exception('Error: invalid DAC Voltage!')
voltage_mu = int(value)
yield self.api.setDACOffset(dac_num, voltage_mu)
@setting(424, "DAC OFS", value='v', units='s', returns='')
def setDACglobal(self, c, value, units):
"""
Write to the OFSx registers of the DAC.
Arguments:
value (float) : the value to write to the DAC OFSx register
units (str) : the voltage units, either 'mu' or 'v'
"""
voltage_mu = None
if units == 'v':
voltage_mu = yield self.voltage_to_mu(value)
elif units == 'mu':
if (value < 0) or (value > 0x2fff):
raise Exception('Error: invalid DAC Voltage!')
voltage_mu = int(value)
yield self.api.setDACGlobal(voltage_mu)
@setting(431, "DAC Read", dac_num='i', reg='s', returns='i')
def readDAC(self, c, dac_num, reg):
"""
Read the value of a DAC register.
Arguments:
dac_num (int) : the dac channel number
param (float) : the register to read from
"""
if (dac_num > 31) or (dac_num < 0):
raise Exception('Error: device does not exist.')
elif reg.upper() not in AD53XX_REGISTERS.keys():
raise Exception('Error: invalid register. Must be one of ' + str(tuple(AD53XX_REGISTERS.keys())))
reg_val = yield self.api.readDAC(dac_num, AD53XX_REGISTERS[reg])
returnValue(reg_val)
# SAMPLER
@setting(511, "Sampler Initialize", returns='')
def initializeSampler(self, c):
"""
Initialize the Sampler.
"""
yield self.api.initializeSampler()
@setting(512, "Sampler Gain", channel='i', gain='i', returns='')
def setSamplerGain(self, c, channel, gain):
"""
Set the gain of a sampler channel.
Arguments:
channel (int) : the dac channel number
gain (int) : the channel gain
"""
if gain not in (1, 10, 100, 1000):
raise Exception('Error: invalid gain. Must be one of (1, 10, 100, 1000).')
yield self.api.setSamplerGain(channel, int(np.log10(gain)))
@setting(521, "Sampler Read", samples='i', returns='*v')
def readSampler(self, c, samples=None):
"""
Acquire samples.
Arguments:
samples (int) : the number of samples to read
Returns:
(*float): the samples
"""
if samples is None:
samples = 8
elif samples % 2 == 1:
raise Exception('Error: number of samples must be even')
# get channel gains
gains = yield self.api.getSamplerGains()
gains = [(gains >> 2 * i) & 0b11 for i in range(8)]
# acquire samples
sampleArr = [0] * samples
yield self.api.readSampler(sampleArr)
# convert mu to gain
for i in range(len(sampleArr)):
self.adc_mu_to_volt(sampleArr[i], gains[i % 8])
returnValue(sampleArr)
# CONTEXT
def notifyOtherListeners(self, context, message, f):
"""
Notifies all listeners except the one in the given context, executing function f
"""
notified = self.listeners.copy()
notified.remove(context.ID)
f(message, notified)
def initContext(self, c):
"""Initialize a new context object."""
self.listeners.add(c.ID)
def expireContext(self, c):
self.listeners.remove(c.ID)
def stopServer(self):
self.core_moninj_task.cancel()
if __name__ == '__main__':
from labrad import util
util.runServer(ARTIQ_Server())
#todo: check that device exists
#todo: block during exp run | [
"sipyco.pc_rpc.Client",
"numpy.log10",
"twisted.internet.threads.deferToThread",
"twisted.internet.defer.DeferredLock",
"artiq.master.databases.DeviceDB",
"labrad.server.LabradServer.__init__",
"twisted.internet.defer.returnValue",
"artiq_api.ARTIQ_api",
"labrad.server.setting",
"labrad.server.Sig... | [((2001, 2053), 'labrad.server.Signal', 'Signal', (['TTLSIGNAL_ID', '"""signal: ttl changed"""', '"""(sib)"""'], {}), "(TTLSIGNAL_ID, 'signal: ttl changed', '(sib)')\n", (2007, 2053), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((2071, 2123), 'labrad.server.Signal', 'Signal', (['DACSIGNAL_ID', '"""signal: dac changed"""', '"""(ssv)"""'], {}), "(DACSIGNAL_ID, 'signal: dac changed', '(ssv)')\n", (2077, 2123), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((4671, 4711), 'labrad.server.setting', 'setting', (['(21)', '"""Get Devices"""'], {'returns': '"""*s"""'}), "(21, 'Get Devices', returns='*s')\n", (4678, 4711), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((4922, 4987), 'labrad.server.setting', 'setting', (['(111)', '"""Run Experiment"""'], {'path': '"""s"""', 'maxruns': '"""i"""', 'returns': '""""""'}), "(111, 'Run Experiment', path='s', maxruns='i', returns='')\n", (4929, 4987), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((6064, 6107), 'labrad.server.setting', 'setting', (['(112)', '"""Stop Experiment"""'], {'returns': '""""""'}), "(112, 'Stop Experiment', returns='')\n", (6071, 6107), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((6661, 6704), 'labrad.server.setting', 'setting', (['(113)', '"""Runs Completed"""'], {'returns': '"""i"""'}), "(113, 'Runs Completed', returns='i')\n", (6668, 6704), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((6947, 6984), 'labrad.server.setting', 'setting', (['(211)', '"""TTL Get"""'], {'returns': '"""*s"""'}), "(211, 'TTL Get', returns='*s')\n", (6954, 6984), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((7115, 7175), 'labrad.server.setting', 'setting', (['(221)', '"""TTL Set"""'], {'ttl_name': '"""s"""', 'state': '"""b"""', 'returns': '""""""'}), "(221, 'TTL Set', ttl_name='s', state='b', returns='')\n", (7122, 7175), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((7570, 7607), 'labrad.server.setting', 'setting', (['(311)', '"""DDS Get"""'], {'returns': '"""*s"""'}), "(311, 'DDS Get', returns='*s')\n", (7577, 7607), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((7867, 7923), 'labrad.server.setting', 'setting', (['(321)', '"""DDS Initialize"""'], {'dds_name': '"""s"""', 'returns': '""""""'}), "(321, 'DDS Initialize', dds_name='s', returns='')\n", (7874, 7923), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((8256, 8319), 'labrad.server.setting', 'setting', (['(322)', '"""DDS Toggle"""'], {'dds_name': '"""s"""', 'state': '"""b"""', 'returns': '""""""'}), "(322, 'DDS Toggle', dds_name='s', state='b', returns='')\n", (8263, 8319), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((8715, 8800), 'labrad.server.setting', 'setting', (['(323)', '"""DDS Waveform"""'], {'dds_name': '"""s"""', 'param': '"""s"""', 'param_val': '"""v"""', 'returns': '""""""'}), "(323, 'DDS Waveform', dds_name='s', param='s', param_val='v', returns=''\n )\n", (8722, 8800), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((9834, 9911), 'labrad.server.setting', 'setting', (['(326)', '"""DDS Attenuation"""'], {'dds_name': '"""s"""', 'att': '"""v"""', 'units': '"""s"""', 'returns': '""""""'}), "(326, 'DDS Attenuation', dds_name='s', att='v', units='s', returns='')\n", (9841, 9911), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((10396, 10469), 'labrad.server.setting', 'setting', (['(331)', '"""DDS Read"""'], {'dds_name': '"""s"""', 'addr': '"""i"""', 'length': '"""i"""', 'returns': '"""w"""'}), "(331, 'DDS Read', dds_name='s', addr='i', length='i', returns='w')\n", (10403, 10469), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((11167, 11209), 'labrad.server.setting', 'setting', (['(411)', '"""DAC Initialize"""'], {'returns': '""""""'}), "(411, 'DAC Initialize', returns='')\n", (11174, 11209), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((11348, 11418), 'labrad.server.setting', 'setting', (['(421)', '"""DAC Set"""'], {'dac_num': '"""i"""', 'value': '"""v"""', 'units': '"""s"""', 'returns': '""""""'}), "(421, 'DAC Set', dac_num='i', value='v', units='s', returns='')\n", (11355, 11418), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((12238, 12308), 'labrad.server.setting', 'setting', (['(422)', '"""DAC Gain"""'], {'dac_num': '"""i"""', 'gain': '"""v"""', 'units': '"""s"""', 'returns': '""""""'}), "(422, 'DAC Gain', dac_num='i', gain='v', units='s', returns='')\n", (12245, 12308), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((13115, 13188), 'labrad.server.setting', 'setting', (['(423)', '"""DAC Offset"""'], {'dac_num': '"""i"""', 'value': '"""v"""', 'units': '"""s"""', 'returns': '""""""'}), "(423, 'DAC Offset', dac_num='i', value='v', units='s', returns='')\n", (13122, 13188), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((14034, 14091), 'labrad.server.setting', 'setting', (['(424)', '"""DAC OFS"""'], {'value': '"""v"""', 'units': '"""s"""', 'returns': '""""""'}), "(424, 'DAC OFS', value='v', units='s', returns='')\n", (14041, 14091), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((14707, 14766), 'labrad.server.setting', 'setting', (['(431)', '"""DAC Read"""'], {'dac_num': '"""i"""', 'reg': '"""s"""', 'returns': '"""i"""'}), "(431, 'DAC Read', dac_num='i', reg='s', returns='i')\n", (14714, 14766), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((15396, 15442), 'labrad.server.setting', 'setting', (['(511)', '"""Sampler Initialize"""'], {'returns': '""""""'}), "(511, 'Sampler Initialize', returns='')\n", (15403, 15442), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((15584, 15647), 'labrad.server.setting', 'setting', (['(512)', '"""Sampler Gain"""'], {'channel': '"""i"""', 'gain': '"""i"""', 'returns': '""""""'}), "(512, 'Sampler Gain', channel='i', gain='i', returns='')\n", (15591, 15647), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((16086, 16141), 'labrad.server.setting', 'setting', (['(521)', '"""Sampler Read"""'], {'samples': '"""i"""', 'returns': '"""*v"""'}), "(521, 'Sampler Read', samples='i', returns='*v')\n", (16093, 16141), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((2183, 2194), 'artiq_api.ARTIQ_api', 'ARTIQ_api', ([], {}), '()\n', (2192, 2194), False, 'from artiq_api import ARTIQ_api\n'), ((2311, 2338), 'artiq.master.databases.DeviceDB', 'DeviceDB', (['self.ddb_filepath'], {}), '(self.ddb_filepath)\n', (2319, 2338), False, 'from artiq.master.databases import DeviceDB\n'), ((2369, 2396), 'artiq.master.worker_db.DeviceManager', 'DeviceManager', (['self.devices'], {}), '(self.devices)\n', (2382, 2396), False, 'from artiq.master.worker_db import DeviceManager\n'), ((2405, 2432), 'labrad.server.LabradServer.__init__', 'LabradServer.__init__', (['self'], {}), '(self)\n', (2426, 2432), False, 'from labrad.server import LabradServer, setting, Signal\n'), ((2867, 2905), 'sipyco.pc_rpc.Client', 'Client', (['"""::1"""', '(3251)', '"""master_schedule"""'], {}), "('::1', 3251, 'master_schedule')\n", (2873, 2905), False, 'from sipyco.pc_rpc import Client\n'), ((2930, 2970), 'sipyco.pc_rpc.Client', 'Client', (['"""::1"""', '(3251)', '"""master_dataset_db"""'], {}), "('::1', 3251, 'master_dataset_db')\n", (2936, 2970), False, 'from sipyco.pc_rpc import Client\n'), ((3129, 3143), 'twisted.internet.defer.DeferredLock', 'DeferredLock', ([], {}), '()\n', (3141, 3143), False, 'from twisted.internet.defer import DeferredLock, inlineCallbacks, returnValue\n'), ((6902, 6929), 'twisted.internet.defer.returnValue', 'returnValue', (['completed_runs'], {}), '(completed_runs)\n', (6913, 6929), False, 'from twisted.internet.defer import DeferredLock, inlineCallbacks, returnValue\n'), ((11129, 11149), 'twisted.internet.defer.returnValue', 'returnValue', (['reg_val'], {}), '(reg_val)\n', (11140, 11149), False, 'from twisted.internet.defer import DeferredLock, inlineCallbacks, returnValue\n'), ((15354, 15374), 'twisted.internet.defer.returnValue', 'returnValue', (['reg_val'], {}), '(reg_val)\n', (15365, 15374), False, 'from twisted.internet.defer import DeferredLock, inlineCallbacks, returnValue\n'), ((16905, 16927), 'twisted.internet.defer.returnValue', 'returnValue', (['sampleArr'], {}), '(sampleArr)\n', (16916, 16927), False, 'from twisted.internet.defer import DeferredLock, inlineCallbacks, returnValue\n'), ((5911, 6017), 'twisted.internet.threads.deferToThread', 'deferToThread', (['self.scheduler.submit'], {'pipeline_name': 'ps_pipeline', 'expid': 'ps_expid', 'priority': 'ps_priority'}), '(self.scheduler.submit, pipeline_name=ps_pipeline, expid=\n ps_expid, priority=ps_priority)\n', (5924, 6017), False, 'from twisted.internet.threads import deferToThread\n'), ((6460, 6509), 'twisted.internet.threads.deferToThread', 'deferToThread', (['self.scheduler.delete', 'self.ps_rid'], {}), '(self.scheduler.delete, self.ps_rid)\n', (6473, 6509), False, 'from twisted.internet.threads import deferToThread\n'), ((16063, 16077), 'numpy.log10', 'np.log10', (['gain'], {}), '(gain)\n', (16071, 16077), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import inspect
import time
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
"""Pybullet simulation of a bittle robot."""
import math
import os
import re
import numpy as np
import pybullet as pyb # pytype: disable=import-error
from motion_imitation.robots import bittle_pose_utils
from motion_imitation.robots import bittle_constants
from motion_imitation.robots import bittle_motor
from motion_imitation.robots import minitaur
from motion_imitation.robots import robot_config
from motion_imitation.envs import locomotion_gym_config
NUM_MOTORS = 8
NUM_LEGS = 4
MOTOR_NAMES = [
"FR_upper_leg_2_hip_motor_joint",
"FR_lower_leg_2_upper_leg_joint",
"FL_upper_leg_2_hip_motor_joint",
"FL_lower_leg_2_upper_leg_joint",
"RR_upper_leg_2_hip_motor_joint",
"RR_lower_leg_2_upper_leg_joint",
"RL_upper_leg_2_hip_motor_joint",
"RL_lower_leg_2_upper_leg_joint"
]
INIT_RACK_POSITION = [0, 0, 1]
INIT_POSITION = [0, 0, 0.90]
JOINT_DIRECTIONS = np.array([1, 1, 1, 1, 1, 1, 1, 1])
HIP_JOINT_OFFSET = 0.0
UPPER_LEG_JOINT_OFFSET = 0.0
KNEE_JOINT_OFFSET = 0.0
DOFS_PER_LEG = 2 #one for each servo motor (rotational DF)
JOINT_OFFSETS = np.array(
[UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)
PI = math.pi
MAX_MOTOR_ANGLE_CHANGE_PER_STEP = 0.2
#XYZ is this abduction?
_DEFAULT_HIP_POSITIONS = (
(0.21, -0.1157, 0),
(0.21, 0.1157, 0),
(-0.21, -0.1157, 0),
(-0.21, 0.1157, 0),
)
# ABDUCTION_P_GAIN = 220.0
# ABDUCTION_D_GAIN = 0.3
# HIP_P_GAIN = 220.0
# HIP_D_GAIN = 2.0
# KNEE_P_GAIN = 220.0
# KNEE_D_GAIN = 2.0
# HIP_P_GAIN = 22.00
# HIP_D_GAIN = .20
# KNEE_P_GAIN = 22.00
# KNEE_D_GAIN = .20
# HIP_P_GAIN = 55.00
# HIP_D_GAIN = .5
# KNEE_P_GAIN = 55.00
# KNEE_D_GAIN = .5
HIP_P_GAIN = 2.75
HIP_D_GAIN = .025
KNEE_P_GAIN = 2.75
KNEE_D_GAIN = .025
# Bases on the readings from Bittles's default pose.
INIT_MOTOR_ANGLES = np.array([
bittle_pose_utils.BITTLE_DEFAULT_HIP_ANGLE,
bittle_pose_utils.BITTLE_DEFAULT_KNEE_ANGLE
] * NUM_LEGS)
_CHASSIS_NAME_PATTERN = re.compile(r"\w+_chassis_\w+")
_MOTOR_NAME_PATTERN = re.compile(r"\w+_hip_motor_\w+")
_KNEE_NAME_PATTERN = re.compile(r"\w+_lower_leg_\w+")
_TOE_NAME_PATTERN = re.compile(r"jtoe\d*")
URDF_FILENAME = "models/bittle.urdf"
_BODY_B_FIELD_NUMBER = 2
_LINK_A_FIELD_NUMBER = 3
#Change
UPPER_BOUND = 1.5708
LOWER_BOUND = -1.5708
class Bittle(minitaur.Minitaur):
"""A simulation for the Bittle robot."""
#CHANGE these values (not used)
# MPC_BODY_MASS = 2.64/9.8 #.27kg
# MPC_BODY_INERTIA = (0, 0, 0, 0, 0, 0, 0, 0)
# MPC_BODY_HEIGHT = 0.42
ACTION_CONFIG = [
locomotion_gym_config.ScalarField(name="FR_upper_leg_2_hip_motor_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="FR_lower_leg_2_upper_leg_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="FL_upper_leg_2_hip_motor_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="FL_lower_leg_2_upper_leg_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="RR_upper_leg_2_hip_motor_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="RR_lower_leg_2_upper_leg_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="RL_upper_leg_2_hip_motor_joint",
upper_bound=1.5708,
lower_bound=-1.5708),
locomotion_gym_config.ScalarField(name="RL_lower_leg_2_upper_leg_joint",
upper_bound=1.5708,
lower_bound=-1.5708)
]
def __init__(
self,
pybullet_client,
motor_control_mode,
urdf_filename=URDF_FILENAME,
enable_clip_motor_commands=False,
time_step=0.001,
action_repeat=33,
sensors=None,
control_latency=0.002,
on_rack=False,
enable_action_interpolation=True,
enable_action_filter=False,
reset_time=-1,
allow_knee_contact=False,
):
self._urdf_filename = urdf_filename
self._allow_knee_contact = allow_knee_contact
self._enable_clip_motor_commands = enable_clip_motor_commands
motor_kp = [
HIP_P_GAIN, KNEE_P_GAIN,
HIP_P_GAIN, KNEE_P_GAIN,
HIP_P_GAIN, KNEE_P_GAIN,
HIP_P_GAIN, KNEE_P_GAIN
]
motor_kd = [
HIP_D_GAIN, KNEE_D_GAIN,
HIP_D_GAIN, KNEE_D_GAIN,
HIP_D_GAIN, KNEE_D_GAIN,
HIP_D_GAIN, KNEE_D_GAIN
]
super(Bittle, self).__init__(
pybullet_client=pybullet_client,
time_step=time_step,
action_repeat=action_repeat,
num_motors=NUM_MOTORS,
dofs_per_leg=DOFS_PER_LEG,
motor_direction=JOINT_DIRECTIONS,
motor_offset=JOINT_OFFSETS,
motor_overheat_protection=False,
motor_control_mode=motor_control_mode,
motor_model_class=bittle_motor.BittleMotorModel,
sensors=sensors,
motor_kp=motor_kp,
motor_kd=motor_kd,
control_latency=control_latency,
on_rack=on_rack,
enable_action_interpolation=enable_action_interpolation,
enable_action_filter=enable_action_filter,
reset_time=reset_time)
def _LoadRobotURDF(self):
bittle_urdf_path = self.GetURDFFile()
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
bittle_urdf_path,
self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation(),
flags=self._pybullet_client.URDF_USE_SELF_COLLISION)
else:
self.quadruped = self._pybullet_client.loadURDF(
bittle_urdf_path, self._GetDefaultInitPosition(),
self._GetDefaultInitOrientation())
def _SettleDownForReset(self, default_motor_angles, reset_time):
self.ReceiveObservation()
if reset_time <= 0:
return
for _ in range(500):
self._StepInternal(
INIT_MOTOR_ANGLES,
motor_control_mode=robot_config.MotorControlMode.POSITION)
if default_motor_angles is not None:
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self._StepInternal(
default_motor_angles,
motor_control_mode=robot_config.MotorControlMode.POSITION)
def GetHipPositionsInBaseFrame(self):
return _DEFAULT_HIP_POSITIONS
def GetFootContacts(self):
all_contacts = self._pybullet_client.getContactPoints(bodyA=self.quadruped)
contacts = [False, False, False, False]
for contact in all_contacts:
# Ignore self contacts
if contact[_BODY_B_FIELD_NUMBER] == self.quadruped:
continue
try:
toe_link_index = self._foot_link_ids.index(
contact[_LINK_A_FIELD_NUMBER])
contacts[toe_link_index] = True
except ValueError:
continue
return contacts
def ComputeJacobian(self, leg_id):
"""Compute the Jacobian for a given leg."""
# Because of the default rotation in the Laikago URDF, we need to reorder
# the rows in the Jacobian matrix.
#return super(Bittle, self).ComputeJacobian(leg_id)[(2, 0, 1), :]
#CHANGE rotation of bittle is normal
return super(Bittle, self).ComputeJacobian(leg_id)
def ResetPose(self, add_constraint):
del add_constraint
for name in self._joint_name_to_id:
joint_id = self._joint_name_to_id[name]
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(joint_id),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=0)
for name, i in zip(MOTOR_NAMES, range(len(MOTOR_NAMES))):
if "hip_motor_2_chassis_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + HIP_JOINT_OFFSET
elif "upper_leg_2_hip_motor_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + UPPER_LEG_JOINT_OFFSET
elif "lower_leg_2_upper_leg_joint" in name:
angle = INIT_MOTOR_ANGLES[i] + KNEE_JOINT_OFFSET
else:
raise ValueError("The name %s is not recognized as a motor joint." %
name)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id[name],
angle,
targetVelocity=0)
def GetURDFFile(self):
return self._urdf_filename
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file.
Raises:
ValueError: Unknown category of the joint name.
"""
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._chassis_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._knee_link_ids = []
self._foot_link_ids = []
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if _CHASSIS_NAME_PATTERN.match(joint_name):
self._chassis_link_ids.append(joint_id)
elif _MOTOR_NAME_PATTERN.match(joint_name):
self._motor_link_ids.append(joint_id)
# We either treat the lower leg or the toe as the foot link, depending on
# the urdf version used.
elif _KNEE_NAME_PATTERN.match(joint_name):
self._knee_link_ids.append(joint_id)
elif _TOE_NAME_PATTERN.match(joint_name):
self._foot_link_ids.append(joint_id)
# else:
# raise ValueError("Unknown category of joint %s" % joint_name)
self._leg_link_ids.extend(self._knee_link_ids)
self._leg_link_ids.extend(self._foot_link_ids)
if self._allow_knee_contact:
self._foot_link_ids.extend(self._knee_link_ids)
self._chassis_link_ids.sort()
self._motor_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
def _GetMotorNames(self):
return MOTOR_NAMES
def _GetDefaultInitPosition(self):
if self._on_rack:
return INIT_RACK_POSITION
else:
return INIT_POSITION
def _GetDefaultInitOrientation(self):
# The Laikago URDF assumes the initial pose of heading towards z axis,
# and belly towards y axis. The following transformation is to transform
# the Laikago initial orientation to our commonly used orientation: heading
# towards -x direction, and z axis is the up direction.
# init_orientation = pyb.getQuaternionFromEuler(
# [math.pi / 2.0, 0, math.pi / 2.0])
#bittle heads in y direciton, change to the x direction
init_orientation= pyb.getQuaternionFromEuler([0,0, -1.5708])
return init_orientation
def GetDefaultInitPosition(self):
"""Get default initial base position."""
return self._GetDefaultInitPosition()
def GetDefaultInitOrientation(self):
"""Get default initial base orientation."""
return self._GetDefaultInitOrientation()
def GetDefaultInitJointPose(self):
"""Get default initial joint pose."""
joint_pose = (INIT_MOTOR_ANGLES + JOINT_OFFSETS) * JOINT_DIRECTIONS
return joint_pose
def ApplyAction(self, motor_commands, motor_control_mode):
"""Clips and then apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).N
motor_control_mode: A MotorControlMode enum.
"""
if self._enable_clip_motor_commands:
motor_commands = self._ClipMotorCommands(motor_commands)
super(Bittle, self).ApplyAction(motor_commands, motor_control_mode)
def _ClipMotorCommands(self, motor_commands):
"""Clips motor commands.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).
Returns:
Clipped motor commands.
"""
# clamp the motor command by the joint limit, in case weired things happens
max_angle_change = MAX_MOTOR_ANGLE_CHANGE_PER_STEP
current_motor_angles = self.GetMotorAngles()
motor_commands = np.clip(motor_commands,
current_motor_angles - max_angle_change,
current_motor_angles + max_angle_change)
return motor_commands
@classmethod
def GetConstants(cls):
del cls
return bittle_constants
| [
"numpy.clip",
"re.compile",
"inspect.currentframe",
"os.sys.path.insert",
"os.path.dirname",
"numpy.array",
"pybullet.getQuaternionFromEuler",
"motion_imitation.envs.locomotion_gym_config.ScalarField"
] | [((789, 821), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (807, 821), False, 'import os\n'), ((1712, 1746), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1])\n', (1720, 1746), True, 'import numpy as np\n'), ((1898, 1955), 'numpy.array', 'np.array', (['([UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)'], {}), '([UPPER_LEG_JOINT_OFFSET, KNEE_JOINT_OFFSET] * 4)\n', (1906, 1955), True, 'import numpy as np\n'), ((2610, 2725), 'numpy.array', 'np.array', (['([bittle_pose_utils.BITTLE_DEFAULT_HIP_ANGLE, bittle_pose_utils.\n BITTLE_DEFAULT_KNEE_ANGLE] * NUM_LEGS)'], {}), '([bittle_pose_utils.BITTLE_DEFAULT_HIP_ANGLE, bittle_pose_utils.\n BITTLE_DEFAULT_KNEE_ANGLE] * NUM_LEGS)\n', (2618, 2725), True, 'import numpy as np\n'), ((2756, 2787), 're.compile', 're.compile', (['"""\\\\w+_chassis_\\\\w+"""'], {}), "('\\\\w+_chassis_\\\\w+')\n", (2766, 2787), False, 'import re\n'), ((2809, 2842), 're.compile', 're.compile', (['"""\\\\w+_hip_motor_\\\\w+"""'], {}), "('\\\\w+_hip_motor_\\\\w+')\n", (2819, 2842), False, 'import re\n'), ((2863, 2896), 're.compile', 're.compile', (['"""\\\\w+_lower_leg_\\\\w+"""'], {}), "('\\\\w+_lower_leg_\\\\w+')\n", (2873, 2896), False, 'import re\n'), ((2916, 2938), 're.compile', 're.compile', (['"""jtoe\\\\d*"""'], {}), "('jtoe\\\\d*')\n", (2926, 2938), False, 'import re\n'), ((760, 787), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (775, 787), False, 'import os\n'), ((3330, 3447), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""FR_upper_leg_2_hip_motor_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='FR_upper_leg_2_hip_motor_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (3363, 3447), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((3531, 3648), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""FR_lower_leg_2_upper_leg_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='FR_lower_leg_2_upper_leg_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (3564, 3648), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((3732, 3849), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""FL_upper_leg_2_hip_motor_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='FL_upper_leg_2_hip_motor_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (3765, 3849), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((3933, 4050), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""FL_lower_leg_2_upper_leg_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='FL_lower_leg_2_upper_leg_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (3966, 4050), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((4134, 4251), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""RR_upper_leg_2_hip_motor_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='RR_upper_leg_2_hip_motor_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (4167, 4251), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((4335, 4452), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""RR_lower_leg_2_upper_leg_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='RR_lower_leg_2_upper_leg_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (4368, 4452), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((4536, 4653), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""RL_upper_leg_2_hip_motor_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='RL_upper_leg_2_hip_motor_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (4569, 4653), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((4737, 4854), 'motion_imitation.envs.locomotion_gym_config.ScalarField', 'locomotion_gym_config.ScalarField', ([], {'name': '"""RL_lower_leg_2_upper_leg_joint"""', 'upper_bound': '(1.5708)', 'lower_bound': '(-1.5708)'}), "(name='RL_lower_leg_2_upper_leg_joint',\n upper_bound=1.5708, lower_bound=-1.5708)\n", (4770, 4854), False, 'from motion_imitation.envs import locomotion_gym_config\n'), ((11908, 11951), 'pybullet.getQuaternionFromEuler', 'pyb.getQuaternionFromEuler', (['[0, 0, -1.5708]'], {}), '([0, 0, -1.5708])\n', (11934, 11951), True, 'import pybullet as pyb\n'), ((13381, 13491), 'numpy.clip', 'np.clip', (['motor_commands', '(current_motor_angles - max_angle_change)', '(current_motor_angles + max_angle_change)'], {}), '(motor_commands, current_motor_angles - max_angle_change, \n current_motor_angles + max_angle_change)\n', (13388, 13491), True, 'import numpy as np\n'), ((706, 728), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (726, 728), False, 'import inspect\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List
import numpy as np
import pandas as pd
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
logger = logging.getLogger(__name__)
DEFAULT_DS = "2019-01-01"
def _dense_to_sparse(dense: np.ndarray) -> List[Dict[str, float]]:
"""Convert dense array to sparse representation"""
assert len(dense.shape) == 2, f"dense shape is {dense.shape}"
# pyre-fixme[7]: Expected `List[Dict[str, float]]` but got `List[Dict[int,
# typing.Any]]`.
return [{i: v.item() for i, v in enumerate(elem)} for elem in dense]
def replay_buffer_to_pre_timeline_df(
is_discrete_action: bool, replay_buffer: ReplayBuffer
) -> pd.DataFrame:
"""Format needed for uploading dataset to Hive, and then run timeline."""
n = replay_buffer.size
batch = replay_buffer.sample_transition_batch(batch_size=n)
# actions is inconsistent between models, so let's infer them.
possible_actions_mask = getattr(batch, "possible_actions_mask", None)
possible_actions = getattr(batch, "possible_actions", None)
terminal = batch.terminal.squeeze(1).tolist()
assert len(batch.action.shape) == 2
if is_discrete_action:
assert (
batch.action.shape[1] == 1
), f"discrete action batch with shape {batch.action.shape}"
# Discrete action space, should be str
action = [str(a.item()) for a in batch.action]
# assuming we've explored the whole action space
unique_actions = np.unique(batch.action)
possible_actions_mask = [
[1 for _ in range(len(unique_actions))] if not elem_terminal else []
for elem_terminal in terminal
]
possible_actions = [
[str(a) for a in unique_actions] if not elem_terminal else []
for elem_terminal in terminal
]
else:
# Box (parametric) action space, should be map<str, double>
action = _dense_to_sparse(batch.action)
# TODO: handle possible actions/mask here
sequence_number = batch.sequence_number.squeeze(1).tolist()
action_probability = np.exp(batch.log_prob.squeeze(1)).tolist()
reward = batch.reward.squeeze(1).tolist()
rows = {
"ds": [DEFAULT_DS for _ in range(n)],
"state_features": _dense_to_sparse(batch.state),
"action": action,
"mdp_id": batch.mdp_id.tolist(),
"sequence_number": sequence_number,
"action_probability": action_probability,
"reward": reward,
"metrics": [{"reward": r} for r in reward],
}
if possible_actions_mask is not None:
rows["possible_actions_mask"] = possible_actions_mask
if possible_actions is not None:
rows["possible_actions"] = possible_actions
return pd.DataFrame.from_dict(rows)
| [
"logging.getLogger",
"numpy.unique",
"pandas.DataFrame.from_dict"
] | [((262, 289), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (279, 289), False, 'import logging\n'), ((2869, 2897), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['rows'], {}), '(rows)\n', (2891, 2897), True, 'import pandas as pd\n'), ((1599, 1622), 'numpy.unique', 'np.unique', (['batch.action'], {}), '(batch.action)\n', (1608, 1622), True, 'import numpy as np\n')] |
from __future__ import print_function
from os import path
import sys
import warnings
import numpy as np
if sys.version_info[0] > 2:
from urllib.request import URLopener
from urllib.error import HTTPError, URLError
exceptions = (HTTPError, URLError, OSError)
else:
from urllib import URLopener
exceptions = (IOError)
from astropy.io import fits as pyfits
import MCPM
class TpfData(object):
"""
Handles data read from TPF file.
Note that there are no (x,y) coordinates! Instead there are (row, column)
or (column, row) and, yes, I wish one convention was used consistently.
The .jd_short property is time taken from TPF file, i.e., BJD TDB and
corresponds to the center of the exposure.
"""
directory = path.join(MCPM.MODULE_PATH, 'data', 'K2C9', 'tpf')
# The directory where TPF files are stored.
def __init__(self, epic_id=None, campaign=None, file_name=None):
if (epic_id is None) != (campaign is None):
raise ValueError('wrong parameters epic_id and campaign in TpfData.__init__()')
if (file_name is not None) and (epic_id is not None):
raise ValueError('in TpfData.__init__() you cannot specify file_name and epic_id at the same time')
self.epic_id = epic_id
self.campaign = campaign
if file_name is None:
file_name = self._guess_file_name()
self.file_name = file_name
self._verify_and_download()
self._load_data(self._path)
self._column = None
self._row = None
self._pixel_list = None
def _guess_file_name(self):
"""guesses file name based on epic_id and campaign"""
fmt = 'ktwo{:}-c{:}_lpd-targ.fits.gz'
return fmt.format(self.epic_id, self.campaign)
def _load_data(self, file_name):
"""loads header information and data from given file"""
hdu_list = pyfits.open(file_name)
file_ok = True
try:
n_hdu = len(hdu_list)
except:
file_ok = False
else:
if n_hdu < 3:
file_ok = False
if not file_ok:
raise OSError('Error reading file:\n\n' + file_name +
'\n\nYou may want to remove it and rerun the code.')
hdu_2 = hdu_list[2]
self.ra_object = hdu_2.header['RA_OBJ']
self.dec_object = hdu_2.header['DEC_OBJ']
self.channel = hdu_list[0].header['CHANNEL']
self.reference_column = hdu_2.header['CRVAL1P']
self.reference_row = hdu_2.header['CRVAL2P']
self.mask = hdu_2.data
self.n_rows = self.mask.shape[0]
self.n_columns = self.mask.shape[1]
self.n_pixels = self.n_rows * self.n_columns
try:
data = hdu_list[1].data
except:
raise OSError('Error reading file:\n\n' + file_name +
'\n\nYou may want to remove it and rerun the code.')
self.jd_short = data["time"] + 4833. # it is BJD
self.quality_flags = data["quality"].astype(dtype=int)
flux = data["flux"]
pixel_mask = np.isfinite(flux) & (flux != 0)
pixel_mask[:, self.mask < 1] = False
self.pixel_mask = pixel_mask
quality_flags = data["quality"]
# TO_BE_DONE - can someone check if these are the only flags we should remove? Should we change it to a parameter?
quality_flags_ok = ((quality_flags == 0) | (quality_flags == 8192)
| (quality_flags == 16384) | (quality_flags == 24576))
foo = np.sum(np.sum((self.pixel_mask > 0), axis=2), axis=1) # Does anybody understand what is happening here?
self.epoch_mask = (foo > 0) & np.isfinite(self.jd_short) & quality_flags_ok
self.jd_short_masked = self.jd_short[self.epoch_mask]
flux = flux[:, self.mask>0]
if not np.isfinite(flux[self.epoch_mask]).all():
raise ValueError('non-finite value in flux table of {:} - feature not done yet'.format(file_name))
# TO_BE_DONE - code interpolation using e.g. k2_cpm.py lines: 89-92
# TO_BE_DONE - also checks on flux_err?
self.flux = flux
self.median_flux = np.median(flux[self.epoch_mask], axis=0)
flux_err = data["flux_err"]
flux_err = flux_err[:, self.mask>0]
self.flux_err = flux_err
hdu_list.close()
@property
def _path(self):
"""path to the TPF file"""
return path.join(TpfData.directory, self.file_name)
def _verify_and_download(self):
"""check if file is where it should and download if not"""
if path.isfile(self._path):
return
# File does not exist, so we have to download it.
epic_id = int(self.epic_id)
d1 = epic_id - epic_id % 100000
d2 = epic_id % 100000 - epic_id % 1000
url_template = 'https://archive.stsci.edu/missions/k2/target_pixel_files/c{0:d}/{1:d}/{2:05d}/{3}'
url_to_load = url_template.format(self.campaign, d1, d2, self.file_name)
fmt = "Downloading {:} ..... "
print(fmt.format(self.file_name), end='', file=sys.stderr, flush=True)
url_retriever = URLopener()
try:
url_retriever.retrieve(url_to_load, self._path)
except exceptions:
print("", file=sys.stderr, flush=True)
raise IOError(
"\n\nFailed to download file {:}\n\n".format(url_to_load))
if not path.isfile(self._path):
print("", file=sys.stderr, flush=True)
raise IOError(
'Download of\n' + url_to_load + '\nto\n' + self._path +
'somehow failed')
print(" done", file=sys.stderr, flush=True)
@property
def reference_pixel(self):
"""return array that gives reference pixel position"""
return np.array([self.reference_column, self.reference_row])
@property
def pixel_list(self):
"""return array with a list of all pixels"""
if self._pixel_list is None:
inside_1 = np.repeat(np.arange(self.n_columns), self.n_rows)
inside_2 = np.tile(np.arange(self.n_rows), self.n_columns)
inside_coords = np.array([inside_1, inside_2], dtype=int).T
self._pixel_list = inside_coords + self.reference_pixel
return self._pixel_list
def check_pixel_in_tpf(self, column, row):
"""check if given (column,row) pixel is inside the area covered by this TPF file"""
d_column = column - self.reference_column
d_row = row - self.reference_row
if (d_column < 0) or (d_column >= self.n_columns):
return False
if (d_row < 0) or (d_row >= self.n_rows):
return False
return True
def check_pixel_covered(self, column, row):
"""check if we have data for given (column,row) pixel"""
if (not isinstance(column, int) and
not isinstance(column, np.integer)) or (
not isinstance(row, int) and not isinstance(row, np.integer)):
raise TypeError('Pixel coordinates must be of int type\n' +
'got: {:} {:}, {:} {:}'.format(column, type(column), row,
type(row)))
if not self.check_pixel_in_tpf(column, row):
return False
mask_value = self.mask[row - self.reference_row, column - self.reference_column]
return (mask_value > 0)
def _make_column_row_vectors(self):
"""prepare vectors with some numbers"""
self._column = np.tile(np.arange(self.n_columns, dtype=int), self.n_rows)
self._column = self._column[self.mask.flatten()>0] + self.reference_column
self._row = np.repeat(np.arange(self.n_rows, dtype=int), self.n_columns)
self._row = self._row[self.mask.flatten()>0] + self.reference_row
@property
def rows(self):
"""gives array that translates index pixel into row number"""
if self._row is None:
self._make_column_row_vectors()
return self._row
@property
def columns(self):
"""gives array that translates index pixel into column number"""
if self._column is None:
self._make_column_row_vectors()
return self._column
def get_pixel_index(self, row, column):
"""finds index of given (row, column) pixel in given file -
information necessary to extract flux;
float input is rounded to nearest int"""
return self._get_pixel_index(row=int(row+.5), column=int(column+.5))
def _get_pixel_index(self, row, column):
"""finds index of given (row, column) pixel in given file -
information necessary to extract flux;
only int on input"""
if (self._row is None) or (self._column is None):
self._make_column_row_vectors()
index = np.arange(len(self._row))
index_mask = ((self._row == row) & (self._column == column))
try:
out = index[index_mask][0]
except IndexError:
out = None
return out
def get_flux_for_pixel(self, row, column, apply_epoch_mask=False):
"""extracts flux for a single pixel (all epochs) specified as row and column"""
if not self.check_pixel_covered(column=column, row=row):
return None
index = self._get_pixel_index(row=row, column=column)
if apply_epoch_mask:
return self.flux[:,index][self.epoch_mask]
else:
return self.flux[:,index]
def get_flux_err_for_pixel(self, row, column, apply_epoch_mask=False):
"""extracts flux_err for a single pixel (all epochs) specified as row and column"""
if not self.check_pixel_covered(column=column, row=row):
return None
index = self._get_pixel_index(row=row, column=column)
if apply_epoch_mask:
return self.flux_err[:,index][self.epoch_mask]
else:
return self.flux_err[:,index]
def get_fluxes_for_square(self, row_center, column_center, half_size, apply_epoch_mask=False):
"""get matrix that gives fluxes for pixels from (center-half_size) to
(center+half_size) in each axis and including both ends"""
full_size = 2 * half_size + 1
if apply_epoch_mask:
length = sum(self.epoch_mask)
else:
length = len(self.jd_short)
out = np.zeros((full_size, full_size, length))
for i_row in range(-half_size, half_size+1):
row = i_row + row_center
for i_column in range(-half_size, half_size+1):
column = i_column + column_center
flux = self.get_flux_for_pixel(row=row, column=column,
apply_epoch_mask=apply_epoch_mask)
out[i_row+half_size][i_column+half_size] = flux
return out
def get_fluxes_for_pixel_list(self, pixel_list, apply_epoch_mask=False):
"""for pixels from pixel_list get the flux and return it in
a list of pixels"""
out = []
for (x, y) in pixel_list:
out.append(self.get_flux_for_pixel(row=y, column=x))
return out
def save_pixel_curve(self, row, column, file_name, full_time=True):
"""saves the time vector and the flux for a single pixel into a file
"""
flux = self.get_flux_for_pixel(row=row, column=column)
if flux is None:
msg = "wrong call to save_pixel_curve():\nrow = {:}\ncolumn={:}"
warnings.warn(msg.format(row, column))
return
time = np.copy(self.jd_short)
if full_time:
time += 2450000.
np.savetxt(file_name, np.array([time, flux]).T, fmt="%.5f %.8f")
def save_pixel_curve_with_err(self, row, column, file_name,
full_time=True):
"""saves:
the time vector, flux vector, and flux_err vector
for a single pixel into a file
"""
flux = self.get_flux_for_pixel(row=row, column=column)
if flux is None:
msg = "\n\nwrong call to save_pixel_curve_with_err():\nrow = {:}\ncolumn = {:}\n"
warnings.warn(msg.format(row, column))
return
flux_err = self.get_flux_err_for_pixel(row=row, column=column)
time = np.copy(self.jd_short)
if full_time:
time += 2450000.
np.savetxt(file_name, np.array([time, flux, flux_err]).T,
fmt="%.5f %.8f %.8f")
| [
"numpy.copy",
"numpy.median",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"urllib.URLopener",
"numpy.isfinite",
"numpy.sum",
"astropy.io.fits.open",
"numpy.arange"
] | [((766, 816), 'os.path.join', 'path.join', (['MCPM.MODULE_PATH', '"""data"""', '"""K2C9"""', '"""tpf"""'], {}), "(MCPM.MODULE_PATH, 'data', 'K2C9', 'tpf')\n", (775, 816), False, 'from os import path\n'), ((1905, 1927), 'astropy.io.fits.open', 'pyfits.open', (['file_name'], {}), '(file_name)\n', (1916, 1927), True, 'from astropy.io import fits as pyfits\n'), ((4226, 4266), 'numpy.median', 'np.median', (['flux[self.epoch_mask]'], {'axis': '(0)'}), '(flux[self.epoch_mask], axis=0)\n', (4235, 4266), True, 'import numpy as np\n'), ((4493, 4537), 'os.path.join', 'path.join', (['TpfData.directory', 'self.file_name'], {}), '(TpfData.directory, self.file_name)\n', (4502, 4537), False, 'from os import path\n'), ((4653, 4676), 'os.path.isfile', 'path.isfile', (['self._path'], {}), '(self._path)\n', (4664, 4676), False, 'from os import path\n'), ((5225, 5236), 'urllib.URLopener', 'URLopener', ([], {}), '()\n', (5234, 5236), False, 'from urllib import URLopener\n'), ((5890, 5943), 'numpy.array', 'np.array', (['[self.reference_column, self.reference_row]'], {}), '([self.reference_column, self.reference_row])\n', (5898, 5943), True, 'import numpy as np\n'), ((10457, 10497), 'numpy.zeros', 'np.zeros', (['(full_size, full_size, length)'], {}), '((full_size, full_size, length))\n', (10465, 10497), True, 'import numpy as np\n'), ((11663, 11685), 'numpy.copy', 'np.copy', (['self.jd_short'], {}), '(self.jd_short)\n', (11670, 11685), True, 'import numpy as np\n'), ((12376, 12398), 'numpy.copy', 'np.copy', (['self.jd_short'], {}), '(self.jd_short)\n', (12383, 12398), True, 'import numpy as np\n'), ((3130, 3147), 'numpy.isfinite', 'np.isfinite', (['flux'], {}), '(flux)\n', (3141, 3147), True, 'import numpy as np\n'), ((3595, 3630), 'numpy.sum', 'np.sum', (['(self.pixel_mask > 0)'], {'axis': '(2)'}), '(self.pixel_mask > 0, axis=2)\n', (3601, 3630), True, 'import numpy as np\n'), ((5505, 5528), 'os.path.isfile', 'path.isfile', (['self._path'], {}), '(self._path)\n', (5516, 5528), False, 'from os import path\n'), ((7597, 7633), 'numpy.arange', 'np.arange', (['self.n_columns'], {'dtype': 'int'}), '(self.n_columns, dtype=int)\n', (7606, 7633), True, 'import numpy as np\n'), ((7762, 7795), 'numpy.arange', 'np.arange', (['self.n_rows'], {'dtype': 'int'}), '(self.n_rows, dtype=int)\n', (7771, 7795), True, 'import numpy as np\n'), ((3730, 3756), 'numpy.isfinite', 'np.isfinite', (['self.jd_short'], {}), '(self.jd_short)\n', (3741, 3756), True, 'import numpy as np\n'), ((6108, 6133), 'numpy.arange', 'np.arange', (['self.n_columns'], {}), '(self.n_columns)\n', (6117, 6133), True, 'import numpy as np\n'), ((6179, 6201), 'numpy.arange', 'np.arange', (['self.n_rows'], {}), '(self.n_rows)\n', (6188, 6201), True, 'import numpy as np\n'), ((6247, 6288), 'numpy.array', 'np.array', (['[inside_1, inside_2]'], {'dtype': 'int'}), '([inside_1, inside_2], dtype=int)\n', (6255, 6288), True, 'import numpy as np\n'), ((11767, 11789), 'numpy.array', 'np.array', (['[time, flux]'], {}), '([time, flux])\n', (11775, 11789), True, 'import numpy as np\n'), ((12480, 12512), 'numpy.array', 'np.array', (['[time, flux, flux_err]'], {}), '([time, flux, flux_err])\n', (12488, 12512), True, 'import numpy as np\n'), ((3889, 3923), 'numpy.isfinite', 'np.isfinite', (['flux[self.epoch_mask]'], {}), '(flux[self.epoch_mask])\n', (3900, 3923), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import pandas as pd
from utils import *
def sliding_dot_product(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m:n]
def sliding_dot_product_stomp(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m - 1:n]
def calculate_distance_profile(q, t, qt, a, sum_q, sum_q2, mean_t, sigma_t):
n = t.size
m = q.size
b = np.zeros(n - m)
dist = np.zeros(n - m)
for i in range(0, n - m):
b[i] = -2 * (qt[i].real - sum_q * mean_t[i]) / sigma_t[i]
dist[i] = a[i] + b[i] + sum_q2
return np.sqrt(np.abs(dist))
# The code below takes O(m) for each subsequence
# you should replace it for MASS
def compute_mean_std_for_query(Q):
# Compute Q stats -- O(n)
sumQ = np.sum(Q)
sumQ2 = np.sum(np.power(Q, 2))
return sumQ, sumQ2
def pre_compute_mean_std_for_TS(ta, m):
na = len(ta)
sum_t = np.zeros(na - m)
sum_t2 = np.zeros(na - m)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
for i in range(na - m):
sum_t[i] = cumulative_sum_t[i + m] - cumulative_sum_t[i]
sum_t2[i] = cumulative_sum_t2[i + m] - cumulative_sum_t2[i]
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
def pre_compute_mean_std_for_TS_stomp(ta, m):
na = len(ta)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
sum_t = (cumulative_sum_t[m - 1:na] - np.concatenate(([0], cumulative_sum_t[0:na - m])))
sum_t2 = (cumulative_sum_t2[m - 1:na] - np.concatenate(([0], cumulative_sum_t2[0:na - m])))
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
# MUEEN’S ALGORITHM FOR SIMILARITY SEARCH (MASS)
def mass(Q, T, a, meanT, sigmaT):
# Z-Normalisation
if np.std(Q) != 0:
Q = (Q - np.mean(Q)) / np.std(Q)
QT = sliding_dot_product(Q, T)
sumQ, sumQ2 = compute_mean_std_for_query(Q)
return calculate_distance_profile(Q, T, QT, a, sumQ, sumQ2, meanT, sigmaT)
def element_wise_min(Pab, Iab, D, idx, ignore_trivial, m):
for i in range(0, len(D)):
if not ignore_trivial or (
np.abs(idx - i) > m / 2.0): # if it's a self-join, ignore trivial matches in [-m/2,m/2]
if D[i] < Pab[i]:
Pab[i] = D[i]
Iab[i] = idx
return Pab, Iab
def stamp(Ta, Tb, m):
"""
Compute the Matrix Profile between time-series Ta and Tb.
If Ta==Tb, the operation is a self-join and trivial matches are ignored.
:param Ta: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
nb = len(Tb)
na = len(Ta)
Pab = np.ones(na - m) * np.inf
Iab = np.zeros(na - m)
idxes = np.arange(nb - m + 1)
sumT, sumT2, meanT, meanT_2, meanTP2, sigmaT, sigmaT2 = pre_compute_mean_std_for_TS(Ta, m)
a = np.zeros(na - m)
for i in range(0, na - m):
a[i] = (sumT2[i] - 2 * sumT[i] * meanT[i] + m * meanTP2[i]) / sigmaT2[i]
ignore_trivial = np.atleast_1d(Ta == Tb).all()
for idx in idxes:
D = mass(Tb[idx: idx + m], Ta, a, meanT, sigmaT)
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int(np.maximum(idx - m / 2.0, 0))
maxIdx = int(np.minimum(idx + m / 2.0, len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab > D] = i
Pab = np.minimum(Pab, D)
return Pab, Iab
def stomp(T, m):
"""
Compute the Matrix Profile with self join for T
:param T: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
epsilon = 1e-10
n = len(T)
seq_l = n - m
_, _, meanT, _, _, sigmaT, _ = pre_compute_mean_std_for_TS_stomp(T, m)
Pab = np.full(seq_l + 1, np.inf)
Iab = np.zeros(n - m + 1)
ignore_trivial = True
for idx in range(0, seq_l):
# There's somthing with normalization
Q_std = sigmaT[idx] if sigmaT[idx] > epsilon else epsilon
if idx == 0:
QT = sliding_dot_product_stomp(T[0:m], T).real
QT_first = np.copy(QT)
else:
QT[1:] = QT[0:-1] - (T[0:seq_l] * T[idx - 1]) + (T[m:n] * T[idx + m - 1])
QT[0] = QT_first[idx]
# Calculate distance profile
D = (2 * (m - (QT - m * meanT * meanT[idx]) / (Q_std * sigmaT)))
D[D < epsilon] = 0
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int(np.maximum(idx - m / 2.0, 0))
maxIdx = int(np.minimum(idx + m / 2.0, len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab > D] = idx
np.minimum(Pab, D, Pab)
np.sqrt(Pab, Pab)
return Pab, Iab
# Quick Test
# def test_stomp(Ta, m):
# start_time = time.time()
#
# Pab, Iab = stomp(Ta, m)
# print("--- %s seconds ---" % (time.time() - start_time))
# plot_motif(Ta, Pab, Iab, m)
# return Pab, Iab
# Quick Test
# def test_stamp(Ta, Tb, m):
# start_time = time.time()
#
# Pab, Iab = stamp(Ta, Tb, m)
# print("--- %s seconds ---" % (time.time() - start_time))
#
# plot_discord(Ta, Pab, Iab, m, )
# return Pab, Iab
def plot_motif(Ta, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
plt.subplot(211)
plt.plot(Ta, linestyle='--', alpha=0.5)
plt.xlim((0, len(Ta)))
print(np.argmax(values))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Top Motif')
plt.plot(range(np.argmax(values), np.argmax(values) + m), Ta[np.argmax(values):np.argmax(values) + m], c='r',
label='Top Discord')
plt.legend(loc='best')
plt.title('Time-Series')
plt.subplot(212)
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
def plot_discord(Ta, Tb, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[int(len(Ta) / len(Tb)), 1])
plt.subplot(gs[0])
plt.plot(Ta, linestyle='--')
plt.xlim((0, len(Ta)))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Best Match')
plt.legend(loc='best')
plt.title('Time-Series')
plt.ylim((-3, 3))
plt.subplot(gs[1])
plt.plot(Tb)
plt.title('Query')
plt.xlim((0, len(Tb)))
plt.ylim((-3, 3))
plt.figure()
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
def plot_match(Ta, Tb, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[int(len(Ta) / len(Tb)), 1])
plt.subplot(gs[0])
plt.plot(Ta, linestyle='--')
plt.xlim((0, len(Ta)))
print(np.argmax(values))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Best Match')
plt.legend(loc='best')
plt.title('Time-Series')
plt.ylim((-3, 3))
plt.subplot(gs[1])
plt.plot(Tb)
plt.title('Query')
plt.xlim((0, len(Tb)))
plt.ylim((-3, 3))
plt.figure()
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
def RunModel(_file_name, _choice, _element_num):
pattern_size = 5
if _choice == 1:
abnormal_data, abnormal_label = ReadGDDataset(_file_name)
if _choice == 2:
abnormal_data, abnormal_label = ReadHSSDataset(_file_name)
if _choice == 3:
abnormal_data, abnormal_label = ReadS5Dataset(_file_name)
if _choice == 4:
abnormal_data, abnormal_label = ReadNABDataset(_file_name)
if _choice == 5:
abnormal_data, abnormal_label = Read2DDataset(_file_name)
if _choice == 6:
abnormal_data, abnormal_label = ReadUAHDataset(_file_name)
if _choice == 7:
abnormal_data, abnormal_label = ReadECGDataset(_file_name)
ts = abnormal_data.flatten()
query = abnormal_data.flatten()
Pab, Iab = stamp(ts, query, pattern_size * _element_num)
# plot_discord(ts, query, Pab, Iab, pattern_size * elem_num)
final_zscore = Z_Score(np.sum(np.nan_to_num(Pab).reshape([-1, _element_num]), axis=1))
y_pred = CreateLabelBasedOnZscore(final_zscore, 3, True)
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label[:-pattern_size], y_pred)
# PrintPrecisionRecallF1Metrics(precision, recall, f1)
fpr, tpr, roc_auc = CalculateROCAUCMetrics(abnormal_label[:-pattern_size], np.sum(np.nan_to_num(Pab).reshape([-1, _element_num]), axis=1))
# print('roc_auc=' + str(roc_auc))
precision_curve, recall_curve, average_precision = CalculatePrecisionRecallCurve(abnormal_label[:-pattern_size], np.sum(np.nan_to_num(Pab).reshape([-1, _element_num]), axis=1))
# print('pr_auc=' + str(average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label[:-pattern_size], y_pred)
# print('cohen_kappa=' + str(cks))
return precision, recall, f1, roc_auc, average_precision, cks
if __name__ == '__main__':
try:
sys.argv[1]
except IndexError:
for n in range(1, 7):
dataset = n
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, _ in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=2)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, files in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(folder_name, dataset, _element_num=4)
print('########################################')
print('precision=' + str(precision))
print('recall=' + str(recall))
print('f1=' + str(f1))
print('roc_auc=' + str(roc_auc))
print('pr_auc=' + str(pr_auc))
print('cks=' + str(cks))
print('########################################')
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, _ in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=3)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
else:
dataset = int(sys.argv[1])
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset)
print('avg_precision=' + str(precision))
print('avg_recall=' + str(recall))
print('avg_f1=' + str(f1))
print('avg_roc_auc=' + str(roc_auc))
print('avg_pr_auc=' + str(pr_auc))
print('avg_cks=' + str(cks))
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=1)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, _ in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=2)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, files in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(folder_name, dataset, _element_num=4)
print('########################################')
print('precision=' + str(precision))
print('recall=' + str(recall))
print('f1=' + str(f1))
print('roc_auc=' + str(roc_auc))
print('pr_auc=' + str(pr_auc))
print('cks=' + str(cks))
print('########################################')
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, _ in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
precision, recall, f1, roc_auc, pr_auc, cks = RunModel(file_name, dataset, _element_num=3)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.cumsum",
"numpy.arange",
"numpy.divide",
"os.walk",
"numpy.flip",
"numpy.multiply",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"numpy.fft.fft",
"matplotlib.pyplot.plot",
"numpy.subtract",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"... | [((278, 291), 'numpy.flip', 'np.flip', (['q', '(0)'], {}), '(q, 0)\n', (285, 291), True, 'import numpy as np\n'), ((386, 401), 'numpy.fft.fft', 'np.fft.fft', (['qra'], {}), '(qra)\n', (396, 401), True, 'import numpy as np\n'), ((412, 426), 'numpy.fft.fft', 'np.fft.fft', (['ta'], {}), '(ta)\n', (422, 426), True, 'import numpy as np\n'), ((732, 745), 'numpy.flip', 'np.flip', (['q', '(0)'], {}), '(q, 0)\n', (739, 745), True, 'import numpy as np\n'), ((840, 855), 'numpy.fft.fft', 'np.fft.fft', (['qra'], {}), '(qra)\n', (850, 855), True, 'import numpy as np\n'), ((866, 880), 'numpy.fft.fft', 'np.fft.fft', (['ta'], {}), '(ta)\n', (876, 880), True, 'import numpy as np\n'), ((1149, 1164), 'numpy.zeros', 'np.zeros', (['(n - m)'], {}), '(n - m)\n', (1157, 1164), True, 'import numpy as np\n'), ((1176, 1191), 'numpy.zeros', 'np.zeros', (['(n - m)'], {}), '(n - m)\n', (1184, 1191), True, 'import numpy as np\n'), ((1520, 1529), 'numpy.sum', 'np.sum', (['Q'], {}), '(Q)\n', (1526, 1529), True, 'import numpy as np\n'), ((1659, 1675), 'numpy.zeros', 'np.zeros', (['(na - m)'], {}), '(na - m)\n', (1667, 1675), True, 'import numpy as np\n'), ((1689, 1705), 'numpy.zeros', 'np.zeros', (['(na - m)'], {}), '(na - m)\n', (1697, 1705), True, 'import numpy as np\n'), ((1760, 1773), 'numpy.cumsum', 'np.cumsum', (['ta'], {}), '(ta)\n', (1769, 1773), True, 'import numpy as np\n'), ((1999, 2018), 'numpy.divide', 'np.divide', (['sum_t', 'm'], {}), '(sum_t, m)\n', (2008, 2018), True, 'import numpy as np\n'), ((2033, 2053), 'numpy.divide', 'np.divide', (['sum_t2', 'm'], {}), '(sum_t2, m)\n', (2042, 2053), True, 'import numpy as np\n'), ((2070, 2089), 'numpy.power', 'np.power', (['mean_t', '(2)'], {}), '(mean_t, 2)\n', (2078, 2089), True, 'import numpy as np\n'), ((2105, 2136), 'numpy.subtract', 'np.subtract', (['mean_t2', 'mean_t_p2'], {}), '(mean_t2, mean_t_p2)\n', (2116, 2136), True, 'import numpy as np\n'), ((2151, 2168), 'numpy.sqrt', 'np.sqrt', (['sigma_t2'], {}), '(sigma_t2)\n', (2158, 2168), True, 'import numpy as np\n'), ((2359, 2372), 'numpy.cumsum', 'np.cumsum', (['ta'], {}), '(ta)\n', (2368, 2372), True, 'import numpy as np\n'), ((2626, 2645), 'numpy.divide', 'np.divide', (['sum_t', 'm'], {}), '(sum_t, m)\n', (2635, 2645), True, 'import numpy as np\n'), ((2660, 2680), 'numpy.divide', 'np.divide', (['sum_t2', 'm'], {}), '(sum_t2, m)\n', (2669, 2680), True, 'import numpy as np\n'), ((2697, 2716), 'numpy.power', 'np.power', (['mean_t', '(2)'], {}), '(mean_t, 2)\n', (2705, 2716), True, 'import numpy as np\n'), ((2732, 2763), 'numpy.subtract', 'np.subtract', (['mean_t2', 'mean_t_p2'], {}), '(mean_t2, mean_t_p2)\n', (2743, 2763), True, 'import numpy as np\n'), ((2778, 2795), 'numpy.sqrt', 'np.sqrt', (['sigma_t2'], {}), '(sigma_t2)\n', (2785, 2795), True, 'import numpy as np\n'), ((3962, 3978), 'numpy.zeros', 'np.zeros', (['(na - m)'], {}), '(na - m)\n', (3970, 3978), True, 'import numpy as np\n'), ((3991, 4012), 'numpy.arange', 'np.arange', (['(nb - m + 1)'], {}), '(nb - m + 1)\n', (4000, 4012), True, 'import numpy as np\n'), ((4118, 4134), 'numpy.zeros', 'np.zeros', (['(na - m)'], {}), '(na - m)\n', (4126, 4134), True, 'import numpy as np\n'), ((5079, 5105), 'numpy.full', 'np.full', (['(seq_l + 1)', 'np.inf'], {}), '(seq_l + 1, np.inf)\n', (5086, 5105), True, 'import numpy as np\n'), ((5116, 5135), 'numpy.zeros', 'np.zeros', (['(n - m + 1)'], {}), '(n - m + 1)\n', (5124, 5135), True, 'import numpy as np\n'), ((5992, 6009), 'numpy.sqrt', 'np.sqrt', (['Pab', 'Pab'], {}), '(Pab, Pab)\n', (5999, 6009), True, 'import numpy as np\n'), ((6570, 6596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (6580, 6596), True, 'import matplotlib.pyplot as plt\n'), ((6601, 6617), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6612, 6617), True, 'import matplotlib.pyplot as plt\n'), ((6622, 6661), 'matplotlib.pyplot.plot', 'plt.plot', (['Ta'], {'linestyle': '"""--"""', 'alpha': '(0.5)'}), "(Ta, linestyle='--', alpha=0.5)\n", (6630, 6661), True, 'import matplotlib.pyplot as plt\n'), ((7019, 7041), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (7029, 7041), True, 'import matplotlib.pyplot as plt\n'), ((7046, 7070), 'matplotlib.pyplot.title', 'plt.title', (['"""Time-Series"""'], {}), "('Time-Series')\n", (7055, 7070), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7092), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (7087, 7092), True, 'import matplotlib.pyplot as plt\n'), ((7097, 7124), 'matplotlib.pyplot.title', 'plt.title', (['"""Matrix Profile"""'], {}), "('Matrix Profile')\n", (7106, 7124), True, 'import matplotlib.pyplot as plt\n'), ((7360, 7379), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Index"""'], {}), "('Index')\n", (7370, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (7394, 7403), True, 'import matplotlib.pyplot as plt\n'), ((7408, 7418), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7416, 7418), True, 'import matplotlib.pyplot as plt\n'), ((7507, 7533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (7517, 7533), True, 'import matplotlib.pyplot as plt\n'), ((7614, 7632), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (7625, 7632), True, 'import matplotlib.pyplot as plt\n'), ((7637, 7665), 'matplotlib.pyplot.plot', 'plt.plot', (['Ta'], {'linestyle': '"""--"""'}), "(Ta, linestyle='--')\n", (7645, 7665), True, 'import matplotlib.pyplot as plt\n'), ((7845, 7867), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (7855, 7867), True, 'import matplotlib.pyplot as plt\n'), ((7872, 7896), 'matplotlib.pyplot.title', 'plt.title', (['"""Time-Series"""'], {}), "('Time-Series')\n", (7881, 7896), True, 'import matplotlib.pyplot as plt\n'), ((7901, 7918), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 3)'], {}), '((-3, 3))\n', (7909, 7918), True, 'import matplotlib.pyplot as plt\n'), ((7924, 7942), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (7935, 7942), True, 'import matplotlib.pyplot as plt\n'), ((7947, 7959), 'matplotlib.pyplot.plot', 'plt.plot', (['Tb'], {}), '(Tb)\n', (7955, 7959), True, 'import matplotlib.pyplot as plt\n'), ((7965, 7983), 'matplotlib.pyplot.title', 'plt.title', (['"""Query"""'], {}), "('Query')\n", (7974, 7983), True, 'import matplotlib.pyplot as plt\n'), ((8015, 8032), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 3)'], {}), '((-3, 3))\n', (8023, 8032), True, 'import matplotlib.pyplot as plt\n'), ((8038, 8050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8048, 8050), True, 'import matplotlib.pyplot as plt\n'), ((8055, 8082), 'matplotlib.pyplot.title', 'plt.title', (['"""Matrix Profile"""'], {}), "('Matrix Profile')\n", (8064, 8082), True, 'import matplotlib.pyplot as plt\n'), ((8318, 8337), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Index"""'], {}), "('Index')\n", (8328, 8337), True, 'import matplotlib.pyplot as plt\n'), ((8342, 8361), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (8352, 8361), True, 'import matplotlib.pyplot as plt\n'), ((8367, 8377), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8375, 8377), True, 'import matplotlib.pyplot as plt\n'), ((8464, 8490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (8474, 8490), True, 'import matplotlib.pyplot as plt\n'), ((8571, 8589), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (8582, 8589), True, 'import matplotlib.pyplot as plt\n'), ((8594, 8622), 'matplotlib.pyplot.plot', 'plt.plot', (['Ta'], {'linestyle': '"""--"""'}), "(Ta, linestyle='--')\n", (8602, 8622), True, 'import matplotlib.pyplot as plt\n'), ((8832, 8854), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8842, 8854), True, 'import matplotlib.pyplot as plt\n'), ((8859, 8883), 'matplotlib.pyplot.title', 'plt.title', (['"""Time-Series"""'], {}), "('Time-Series')\n", (8868, 8883), True, 'import matplotlib.pyplot as plt\n'), ((8888, 8905), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 3)'], {}), '((-3, 3))\n', (8896, 8905), True, 'import matplotlib.pyplot as plt\n'), ((8911, 8929), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (8922, 8929), True, 'import matplotlib.pyplot as plt\n'), ((8934, 8946), 'matplotlib.pyplot.plot', 'plt.plot', (['Tb'], {}), '(Tb)\n', (8942, 8946), True, 'import matplotlib.pyplot as plt\n'), ((8952, 8970), 'matplotlib.pyplot.title', 'plt.title', (['"""Query"""'], {}), "('Query')\n", (8961, 8970), True, 'import matplotlib.pyplot as plt\n'), ((9002, 9019), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3, 3)'], {}), '((-3, 3))\n', (9010, 9019), True, 'import matplotlib.pyplot as plt\n'), ((9025, 9037), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9035, 9037), True, 'import matplotlib.pyplot as plt\n'), ((9042, 9069), 'matplotlib.pyplot.title', 'plt.title', (['"""Matrix Profile"""'], {}), "('Matrix Profile')\n", (9051, 9069), True, 'import matplotlib.pyplot as plt\n'), ((9305, 9324), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Index"""'], {}), "('Index')\n", (9315, 9324), True, 'import matplotlib.pyplot as plt\n'), ((9329, 9348), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (9339, 9348), True, 'import matplotlib.pyplot as plt\n'), ((9353, 9363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9361, 9363), True, 'import matplotlib.pyplot as plt\n'), ((239, 250), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (247, 250), True, 'import numpy as np\n'), ((334, 353), 'numpy.zeros', 'np.zeros', (['(2 * n - m)'], {}), '(2 * n - m)\n', (342, 353), True, 'import numpy as np\n'), ((530, 552), 'numpy.multiply', 'np.multiply', (['qraf', 'taf'], {}), '(qraf, taf)\n', (541, 552), True, 'import numpy as np\n'), ((693, 704), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (701, 704), True, 'import numpy as np\n'), ((788, 807), 'numpy.zeros', 'np.zeros', (['(2 * n - m)'], {}), '(2 * n - m)\n', (796, 807), True, 'import numpy as np\n'), ((984, 1006), 'numpy.multiply', 'np.multiply', (['qraf', 'taf'], {}), '(qraf, taf)\n', (995, 1006), True, 'import numpy as np\n'), ((1346, 1358), 'numpy.abs', 'np.abs', (['dist'], {}), '(dist)\n', (1352, 1358), True, 'import numpy as np\n'), ((1549, 1563), 'numpy.power', 'np.power', (['Q', '(2)'], {}), '(Q, 2)\n', (1557, 1563), True, 'import numpy as np\n'), ((1808, 1823), 'numpy.power', 'np.power', (['ta', '(2)'], {}), '(ta, 2)\n', (1816, 1823), True, 'import numpy as np\n'), ((2407, 2422), 'numpy.power', 'np.power', (['ta', '(2)'], {}), '(ta, 2)\n', (2415, 2422), True, 'import numpy as np\n'), ((2466, 2515), 'numpy.concatenate', 'np.concatenate', (['([0], cumulative_sum_t[0:na - m])'], {}), '(([0], cumulative_sum_t[0:na - m]))\n', (2480, 2515), True, 'import numpy as np\n'), ((2561, 2611), 'numpy.concatenate', 'np.concatenate', (['([0], cumulative_sum_t2[0:na - m])'], {}), '(([0], cumulative_sum_t2[0:na - m]))\n', (2575, 2611), True, 'import numpy as np\n'), ((2982, 2991), 'numpy.std', 'np.std', (['Q'], {}), '(Q)\n', (2988, 2991), True, 'import numpy as np\n'), ((3927, 3942), 'numpy.ones', 'np.ones', (['(na - m)'], {}), '(na - m)\n', (3934, 3942), True, 'import numpy as np\n'), ((4652, 4670), 'numpy.minimum', 'np.minimum', (['Pab', 'D'], {}), '(Pab, D)\n', (4662, 4670), True, 'import numpy as np\n'), ((5963, 5986), 'numpy.minimum', 'np.minimum', (['Pab', 'D', 'Pab'], {}), '(Pab, D, Pab)\n', (5973, 5986), True, 'import numpy as np\n'), ((6700, 6717), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (6709, 6717), True, 'import numpy as np\n'), ((7193, 7210), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (7202, 7210), True, 'import numpy as np\n'), ((7212, 7226), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (7218, 7226), True, 'import numpy as np\n'), ((7267, 7284), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (7276, 7284), True, 'import numpy as np\n'), ((7286, 7300), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (7292, 7300), True, 'import numpy as np\n'), ((8151, 8168), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (8160, 8168), True, 'import numpy as np\n'), ((8170, 8184), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (8176, 8184), True, 'import numpy as np\n'), ((8225, 8242), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (8234, 8242), True, 'import numpy as np\n'), ((8244, 8258), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (8250, 8258), True, 'import numpy as np\n'), ((8661, 8678), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (8670, 8678), True, 'import numpy as np\n'), ((9138, 9155), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (9147, 9155), True, 'import numpy as np\n'), ((9157, 9171), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (9163, 9171), True, 'import numpy as np\n'), ((9212, 9229), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (9221, 9229), True, 'import numpy as np\n'), ((9231, 9245), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (9237, 9245), True, 'import numpy as np\n'), ((3029, 3038), 'numpy.std', 'np.std', (['Q'], {}), '(Q)\n', (3035, 3038), True, 'import numpy as np\n'), ((4269, 4292), 'numpy.atleast_1d', 'np.atleast_1d', (['(Ta == Tb)'], {}), '(Ta == Tb)\n', (4282, 4292), True, 'import numpy as np\n'), ((5409, 5420), 'numpy.copy', 'np.copy', (['QT'], {}), '(QT)\n', (5416, 5420), True, 'import numpy as np\n'), ((6739, 6756), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (6748, 6756), True, 'import numpy as np\n'), ((6885, 6902), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (6894, 6902), True, 'import numpy as np\n'), ((7713, 7730), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (7722, 7730), True, 'import numpy as np\n'), ((8700, 8717), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (8709, 8717), True, 'import numpy as np\n'), ((23589, 23612), 'os.walk', 'os.walk', (['"""./YAHOO/data"""'], {}), "('./YAHOO/data')\n", (23596, 23612), False, 'import os\n'), ((25471, 25492), 'os.walk', 'os.walk', (['"""./NAB/data"""'], {}), "('./NAB/data')\n", (25478, 25492), False, 'import os\n'), ((27351, 27371), 'os.walk', 'os.walk', (['"""./2D/test"""'], {}), "('./2D/test')\n", (27358, 27371), False, 'import os\n'), ((29387, 29404), 'os.walk', 'os.walk', (['"""./UAH/"""'], {}), "('./UAH/')\n", (29394, 29404), False, 'import os\n'), ((31172, 31189), 'os.walk', 'os.walk', (['"""./ECG/"""'], {}), "('./ECG/')\n", (31179, 31189), False, 'import os\n'), ((3015, 3025), 'numpy.mean', 'np.mean', (['Q'], {}), '(Q)\n', (3022, 3025), True, 'import numpy as np\n'), ((3344, 3359), 'numpy.abs', 'np.abs', (['(idx - i)'], {}), '(idx - i)\n', (3350, 3359), True, 'import numpy as np\n'), ((4482, 4510), 'numpy.maximum', 'np.maximum', (['(idx - m / 2.0)', '(0)'], {}), '(idx - m / 2.0, 0)\n', (4492, 4510), True, 'import numpy as np\n'), ((5797, 5825), 'numpy.maximum', 'np.maximum', (['(idx - m / 2.0)', '(0)'], {}), '(idx - m / 2.0, 0)\n', (5807, 5825), True, 'import numpy as np\n'), ((6758, 6775), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (6767, 6775), True, 'import numpy as np\n'), ((6785, 6802), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (6794, 6802), True, 'import numpy as np\n'), ((6904, 6921), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (6913, 6921), True, 'import numpy as np\n'), ((6931, 6948), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (6940, 6948), True, 'import numpy as np\n'), ((7732, 7749), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (7741, 7749), True, 'import numpy as np\n'), ((7759, 7776), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (7768, 7776), True, 'import numpy as np\n'), ((8719, 8736), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (8728, 8736), True, 'import numpy as np\n'), ((8746, 8763), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (8755, 8763), True, 'import numpy as np\n'), ((6803, 6820), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (6812, 6820), True, 'import numpy as np\n'), ((6949, 6966), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (6958, 6966), True, 'import numpy as np\n'), ((7777, 7794), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (7786, 7794), True, 'import numpy as np\n'), ((8764, 8781), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (8773, 8781), True, 'import numpy as np\n'), ((10277, 10295), 'numpy.nan_to_num', 'np.nan_to_num', (['Pab'], {}), '(Pab)\n', (10290, 10295), True, 'import numpy as np\n'), ((10642, 10660), 'numpy.nan_to_num', 'np.nan_to_num', (['Pab'], {}), '(Pab)\n', (10655, 10660), True, 'import numpy as np\n'), ((10862, 10880), 'numpy.nan_to_num', 'np.nan_to_num', (['Pab'], {}), '(Pab)\n', (10875, 10880), True, 'import numpy as np\n'), ((12405, 12428), 'os.walk', 'os.walk', (['"""./YAHOO/data"""'], {}), "('./YAHOO/data')\n", (12412, 12428), False, 'import os\n'), ((14427, 14448), 'os.walk', 'os.walk', (['"""./NAB/data"""'], {}), "('./NAB/data')\n", (14434, 14448), False, 'import os\n'), ((16447, 16467), 'os.walk', 'os.walk', (['"""./2D/test"""'], {}), "('./2D/test')\n", (16454, 16467), False, 'import os\n'), ((18646, 18663), 'os.walk', 'os.walk', (['"""./UAH/"""'], {}), "('./UAH/')\n", (18653, 18663), False, 'import os\n'), ((20567, 20584), 'os.walk', 'os.walk', (['"""./ECG/"""'], {}), "('./ECG/')\n", (20574, 20584), False, 'import os\n'), ((23924, 23949), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (23931, 23949), False, 'import os\n'), ((25804, 25829), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (25811, 25829), False, 'import os\n'), ((27682, 27707), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (27689, 27707), False, 'import os\n'), ((29473, 29496), 'os.path.join', 'os.path.join', (['root', 'dir'], {}), '(root, dir)\n', (29485, 29496), False, 'import os\n'), ((31500, 31525), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (31507, 31525), False, 'import os\n'), ((12776, 12801), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (12783, 12801), False, 'import os\n'), ((14796, 14821), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (14803, 14821), False, 'import os\n'), ((16814, 16839), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (16821, 16839), False, 'import os\n'), ((18740, 18763), 'os.path.join', 'os.path.join', (['root', 'dir'], {}), '(root, dir)\n', (18752, 18763), False, 'import os\n'), ((20931, 20956), 'os.walk', 'os.walk', (["(root + '/' + dir)"], {}), "(root + '/' + dir)\n", (20938, 20956), False, 'import os\n'), ((24034, 24063), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (24046, 24063), False, 'import os\n'), ((25914, 25943), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (25926, 25943), False, 'import os\n'), ((27792, 27821), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (27804, 27821), False, 'import os\n'), ((31610, 31639), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (31622, 31639), False, 'import os\n'), ((12894, 12923), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (12906, 12923), False, 'import os\n'), ((14914, 14943), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (14926, 14943), False, 'import os\n'), ((16932, 16961), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (16944, 16961), False, 'import os\n'), ((21049, 21078), 'os.path.join', 'os.path.join', (['root', 'dir', 'file'], {}), '(root, dir, file)\n', (21061, 21078), False, 'import os\n')] |
# USAGE
# python anpr_char_det_train.py --modelPath models --imagePath ./../datasets/lplates/train
# You can either pass the annFile (xml annotations file), or if you don't then annotations are loaded from the file name
# labelbinarizer
# Fit to full set of 10 numeric and 26 alphas
# lb.fit(['0','1', ... ,'9','a','b', ... ,'z'])
# Then we can transform all the test and training license plates.
# But how do we transform license plates less than 7 characters?
# Appears that we can use an input which was not presented during "fit" operation, eg
# lb.transform(['0','1','2','c','a',"blank"])
# "blank", returns an all zero vector, [0, 0, 0, ... ,0, 0], whereas the other targets return one-hot vectors
# <NAME> does not have this problem, because he assumes that all plates contain 7 characters
# Check Google Street View paper, and see what they do. StreetView paper, does not backprop when a digit is absent.
# Not sure how that would work in Keras? Can we simply find the output and reflect this back as the target?
# Sounds like a non-standard feature
# If we use the zero vector to represent blanks, does this effectively disable
# back propagation? I don't think so.
# Could just add blank to the list when "fitting". No, what is a blank target?
# OK, so we need an 8th char to represent the number of characters. I do not know how to add a different classifier from
# the other seven, so let's just make it the same length (ie 36), and encode the length as
# '1', '2', ... ,'7'
# TODO: Need to add plate text length. For now only 7 char plate text is allowed
# import the necessary packages
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras import losses
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
from skimage import img_as_ubyte
from keras.utils import plot_model
import os
import sys
from keras.callbacks import ModelCheckpoint
from keras import regularizers
# enable search for base2designs module directory in parent directory
sys.path.append(os.path.split(os.getcwd())[0])
from base2designs.preprocessing import ImageToArrayPreprocessor
from base2designs.preprocessing import SimplePreprocessor
from base2designs.datasets import AnprLabelProcessor
from base2designs.datasets import AnprDatasetLoader
from base2designs.nn.conv import AnprCharDet
def plot(H, epochs, filename):
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.title("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend()
plt.savefig(filename)
#plt.show()
def evaluate(model, testX, testY):
# evaluate the network
# get the predictions, and create clean one-hot predictions
print("[INFO] display some results...")
preds = model.predict(testX, batch_size=32)
argMax = preds.argmax(axis=-1)
predsClean = np.zeros_like(preds, dtype=np.int)
for i in np.arange(len(argMax)):
for j in np.arange(len(argMax[i])):
predsClean[i, j, argMax[i, j]] = 1
# get the ground truth and predicted plate text
gtPlateText = alp.inverse_transform(testY)
predPlateText = alp.inverse_transform(predsClean)
# Generate some statistics
numCorrChars = 0
totalNumChars = PLATE_TEXT_LEN * len(predPlateText)
numCorrPlates = 0
for i in np.arange(len(predPlateText)):
charCorr = 0
for j in np.arange(PLATE_TEXT_LEN):
if predPlateText[i][j] == gtPlateText[i][j]:
numCorrChars += 1
charCorr += 1
if charCorr == PLATE_TEXT_LEN:
numCorrPlates += 1
numCorrPlates = 100. * numCorrPlates / len(predPlateText)
numCorrChars = 100. * numCorrChars / totalNumChars
print("[INFO] Number of correct plates: {:2.02f}%".format(numCorrPlates))
print("[INFO] Number of correct chars: {:2.02f}%".format(numCorrChars))
return numCorrPlates, numCorrChars
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--imagePath", required=True,
help="path to input dataset")
ap.add_argument("-a", "--annFile", required=False, default=None,
help="path to annotations file")
ap.add_argument("-m", "--modelPath", required=True,
help="path to output model")
args = vars(ap.parse_args())
# Check the arguments before proceeding
if os.path.exists(args["imagePath"]) == False:
print("[ERROR] --imagePath \"{}\", does not exist.".format(args["imagePath"]))
sys.exit()
if os.path.exists(args["modelPath"]) == False:
print("[ERROR] --modelPath \"{}\", does not exist.".format(args["modelPath"]))
sys.exit()
if args["annFile"] != None:
if os.path.exists(args["annFile"]) == False:
print("[ERROR] --annFile \"{}\", does not exist.".format(args["annFile"]))
sys.exit()
# Some constants
EPOCHS = 2000 # Number of epochs of training
INPUT_WIDTH = 128 # Network input width
INPUT_HEIGHT = 64 # Network input height
LEARN_RATE=0.001 # Network learning rate
augEnabled = True # Image augmentation. Helps reduce over-fitting
# construct the image generator for data augmentation
# If values are too large, then the plate characters can be moved outside the image boundaries
# Use deep-learning/pb_code/chapter02-data_augmentation to view the augmented images
# 2/26/18 Reduced the magnitude of the variations. This just about keeps the image inside the boundaries
# of the frame
aug = ImageDataGenerator(rotation_range=4, width_shift_range=0.05,
height_shift_range=0.05, shear_range=0.1, zoom_range=0.1,
horizontal_flip=False, fill_mode="nearest")
# initialize the image preprocessors
# sp converts image to gray, and then resizes to 128,64
# iap converts the OpenCV numpy array format to Keras image library format. ie adds an extra dimension to the image, ie 128,64,1
# iap should be applied after any preprocessors that use opencv routines.
sp = SimplePreprocessor(INPUT_WIDTH,INPUT_HEIGHT)
iap = ImageToArrayPreprocessor()
# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
print("[INFO] loading images...")
adl = AnprDatasetLoader(preprocessors=[sp,iap])
(data, labels, winLocs, fileNames, plateCnt) = adl.loadData(args["imagePath"], annFile=args["annFile"], verbose=30,)
if len(data) == 0:
print("[ERROR] No image files found in \"{}\"".format(args["imagePath"]))
sys.exit()
data = data.astype("float") / 255.0
# set up the label classes
plateChars = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I',
'J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
plateLens = [1,2,3,4,5,6,7]
PLATE_TEXT_LEN = plateLens[-1]
NUM_CHAR_CLASSES = len(plateChars)
alp = AnprLabelProcessor(plateChars, plateLens)
# convert the labels from integers to one-hot vectors
plateLabelsOneHot = alp.transform(labels)
# partition the data into training and testing splits using 85% of
# the data for training and the remaining 15% for testing
(trainX, testX, trainY, testY) = train_test_split(data, plateLabelsOneHot,
test_size=0.15, random_state=42)
# Reshape the output vectors to match outputs expected by the model
trainY = trainY.reshape(-1,PLATE_TEXT_LEN, NUM_CHAR_CLASSES)
testY = testY.reshape(-1,PLATE_TEXT_LEN, NUM_CHAR_CLASSES)
# initialize the optimizer and model
print("[INFO] compiling model...")
#opt = SGD(lr=LEARN_RATE, decay=LEARN_RATE/EPOCHS)
#opt = RMSprop(lr=LEARN_RATE, decay=LEARN_RATE/EPOCHS)
opt = RMSprop(lr=LEARN_RATE)
#opt = Adam(lr=LEARN_RATE)
model = AnprCharDet.build(width=INPUT_WIDTH, height=INPUT_HEIGHT, depth=1, textLen=PLATE_TEXT_LEN, numCharClasses=NUM_CHAR_CLASSES)
model.compile(loss='categorical_crossentropy', optimizer=opt)
# Add L2 regularizers to every layer
#for layer in model.layers:
# layer.kernel_regularizer = regularizers.l2(0.01)
plot_model(model, to_file="anprCharDet.png", show_shapes=True)
# construct the callback to save only the *best* model to disk
# based on the validation loss
fname = os.path.sep.join([args["modelPath"],
"model-{epoch:03d}-{val_loss:.4f}.hdf5"])
checkpoint = ModelCheckpoint(fname, monitor="val_loss", mode="min",
save_best_only=True, period=50, verbose=1)
callbacks = [checkpoint]
# train the network
print("[INFO] training network...")
if augEnabled == True:
H= model.fit_generator(aug.flow(trainX, trainY, batch_size=32),
validation_data=(testX, testY), epochs=EPOCHS,
steps_per_epoch=len(trainX) // 32, callbacks=callbacks, verbose=0)
else:
H = model.fit(trainX, trainY, validation_data=(testX, testY),
batch_size=32, epochs=EPOCHS, callbacks=callbacks, verbose=1)
plot(H, EPOCHS, "anpr_char_det_train_plot.png")
# evaluate the network after training
print("[INFO] display some results after training...")
trainError = evaluate(model, testX, testY)
# save the network to disk
#print("[INFO] serializing network...")
#model.save(args["model"])
| [
"matplotlib.pyplot.ylabel",
"base2designs.preprocessing.SimplePreprocessor",
"keras.preprocessing.image.ImageDataGenerator",
"os.path.sep.join",
"sys.exit",
"numpy.arange",
"os.path.exists",
"argparse.ArgumentParser",
"base2designs.datasets.AnprLabelProcessor",
"keras.utils.plot_model",
"matplot... | [((4159, 4184), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4182, 4184), False, 'import argparse\n'), ((5592, 5762), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(4)', 'width_shift_range': '(0.05)', 'height_shift_range': '(0.05)', 'shear_range': '(0.1)', 'zoom_range': '(0.1)', 'horizontal_flip': '(False)', 'fill_mode': '"""nearest"""'}), "(rotation_range=4, width_shift_range=0.05,\n height_shift_range=0.05, shear_range=0.1, zoom_range=0.1,\n horizontal_flip=False, fill_mode='nearest')\n", (5610, 5762), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6061, 6106), 'base2designs.preprocessing.SimplePreprocessor', 'SimplePreprocessor', (['INPUT_WIDTH', 'INPUT_HEIGHT'], {}), '(INPUT_WIDTH, INPUT_HEIGHT)\n', (6079, 6106), False, 'from base2designs.preprocessing import SimplePreprocessor\n'), ((6112, 6138), 'base2designs.preprocessing.ImageToArrayPreprocessor', 'ImageToArrayPreprocessor', ([], {}), '()\n', (6136, 6138), False, 'from base2designs.preprocessing import ImageToArrayPreprocessor\n'), ((6268, 6310), 'base2designs.datasets.AnprDatasetLoader', 'AnprDatasetLoader', ([], {'preprocessors': '[sp, iap]'}), '(preprocessors=[sp, iap])\n', (6285, 6310), False, 'from base2designs.datasets import AnprDatasetLoader\n'), ((6898, 6939), 'base2designs.datasets.AnprLabelProcessor', 'AnprLabelProcessor', (['plateChars', 'plateLens'], {}), '(plateChars, plateLens)\n', (6916, 6939), False, 'from base2designs.datasets import AnprLabelProcessor\n'), ((7195, 7269), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'plateLabelsOneHot'], {'test_size': '(0.15)', 'random_state': '(42)'}), '(data, plateLabelsOneHot, test_size=0.15, random_state=42)\n', (7211, 7269), False, 'from sklearn.model_selection import train_test_split\n'), ((7645, 7667), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'LEARN_RATE'}), '(lr=LEARN_RATE)\n', (7652, 7667), False, 'from keras.optimizers import RMSprop\n'), ((7703, 7831), 'base2designs.nn.conv.AnprCharDet.build', 'AnprCharDet.build', ([], {'width': 'INPUT_WIDTH', 'height': 'INPUT_HEIGHT', 'depth': '(1)', 'textLen': 'PLATE_TEXT_LEN', 'numCharClasses': 'NUM_CHAR_CLASSES'}), '(width=INPUT_WIDTH, height=INPUT_HEIGHT, depth=1, textLen=\n PLATE_TEXT_LEN, numCharClasses=NUM_CHAR_CLASSES)\n', (7720, 7831), False, 'from base2designs.nn.conv import AnprCharDet\n'), ((8008, 8070), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""anprCharDet.png"""', 'show_shapes': '(True)'}), "(model, to_file='anprCharDet.png', show_shapes=True)\n", (8018, 8070), False, 'from keras.utils import plot_model\n'), ((8174, 8252), 'os.path.sep.join', 'os.path.sep.join', (["[args['modelPath'], 'model-{epoch:03d}-{val_loss:.4f}.hdf5']"], {}), "([args['modelPath'], 'model-{epoch:03d}-{val_loss:.4f}.hdf5'])\n", (8190, 8252), False, 'import os\n'), ((8267, 8368), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['fname'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'period': '(50)', 'verbose': '(1)'}), "(fname, monitor='val_loss', mode='min', save_best_only=True,\n period=50, verbose=1)\n", (8282, 8368), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2547, 2570), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2560, 2570), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2585), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2583, 2585), True, 'import matplotlib.pyplot as plt\n'), ((2734, 2760), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss"""'], {}), "('Training Loss')\n", (2743, 2760), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (2773, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2787, 2805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2797, 2805), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2820), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2818, 2820), True, 'import matplotlib.pyplot as plt\n'), ((2823, 2844), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (2834, 2844), True, 'import matplotlib.pyplot as plt\n'), ((3118, 3152), 'numpy.zeros_like', 'np.zeros_like', (['preds'], {'dtype': 'np.int'}), '(preds, dtype=np.int)\n', (3131, 3152), True, 'import numpy as np\n'), ((4522, 4555), 'os.path.exists', 'os.path.exists', (["args['imagePath']"], {}), "(args['imagePath'])\n", (4536, 4555), False, 'import os\n'), ((4649, 4659), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4657, 4659), False, 'import sys\n'), ((4663, 4696), 'os.path.exists', 'os.path.exists', (["args['modelPath']"], {}), "(args['modelPath'])\n", (4677, 4696), False, 'import os\n'), ((4790, 4800), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4798, 4800), False, 'import sys\n'), ((6524, 6534), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6532, 6534), False, 'import sys\n'), ((2597, 2617), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (2606, 2617), True, 'import numpy as np\n'), ((2669, 2689), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (2678, 2689), True, 'import numpy as np\n'), ((3612, 3637), 'numpy.arange', 'np.arange', (['PLATE_TEXT_LEN'], {}), '(PLATE_TEXT_LEN)\n', (3621, 3637), True, 'import numpy as np\n'), ((4834, 4865), 'os.path.exists', 'os.path.exists', (["args['annFile']"], {}), "(args['annFile'])\n", (4848, 4865), False, 'import os\n'), ((4959, 4969), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4967, 4969), False, 'import sys\n'), ((2184, 2195), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2193, 2195), False, 'import os\n')] |
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
import numpy as np
import pytest
import os
import imageio
import matplotlib as mpl
import dps
from dps.datasets.load import load_backgrounds
from dps.datasets.base import EmnistDataset
from dps.utils import NumpySeed, resize_image
from auto_yolo.tf_ops import render_sprites
def get_session():
return tf.Session(config=tf.ConfigProto(log_device_placement=True))
def squash_01(val, squash_factor):
assert ((0 <= val) * (val <= 1)).all()
val = np.array(val, dtype=np.float32)
if squash_factor:
assert squash_factor > 0
return (val - 0.5) * squash_factor + 0.5
else:
return val
def _colorize(img, color=None):
""" Apply a color to a gray-scale image. """
color = mpl.colors.to_rgb(color)
color = np.array(color)[None, None, :]
color = np.uint8(255. * color)
rgb = np.tile(color, img.shape + (1,))
alpha = img[:, :, None]
return np.concatenate([rgb, alpha], axis=2).astype(np.uint8)
def make_patch(patch_shape, color, shape, importance):
f = os.path.join(os.path.dirname(dps.__file__), "datasets/shapes", "{}.png".format(shape))
image = imageio.imread(f)
image = resize_image(image[..., 3], patch_shape)
image = _colorize(image, color)
image = (image / 255.).astype('f')
imp = np.maximum(importance * image[..., 3:4], 0.01)
image = np.concatenate([image, imp], axis=2)
return image
def _get_data():
image_shape = (100, 100)
batch_size = 16
shapes = ((50, 50), (25, 25), (12, 12), (6, 6))
n_sprites = [4, 8, 16, 32]
# sprite_shapes = [(14, 14)]
# n_sprites = [2]
# n_sprites = [16]
sprite_shapes = [(50, 50), (25, 25), (12, 12)]
n_sprites = [2, 4, 8]
n_flights = len(n_sprites)
shapes = 'circle diamond hollow_circle plus star triangle ud_triangle x'.split()
colors = list('rgbcmy')
bg_colors = list('w')
sprites = [[] for i in range(n_flights)]
sprite_color_names = [[] for i in range(n_flights)]
sprite_shape_names = [[] for i in range(n_flights)]
backgrounds = []
for b in range(batch_size):
for i, (ss, ns) in enumerate(zip(sprite_shapes, n_sprites)):
c = np.random.choice(colors, size=ns)
s = np.random.choice(shapes, size=ns)
importances = [4**i] * ns
patches = [make_patch(ss, _c, _s, i) for _c, _s, i in zip(c, s, importances)]
sprites[i].append(patches)
sprite_color_names[i].append(c)
sprite_shape_names[i].append(s)
bg_color = np.random.choice(bg_colors)
bg_shape = np.random.choice(shapes)
bg = make_patch(image_shape, bg_color, bg_shape, 1.0)
bg = bg[..., :3]
backgrounds.append(bg)
sprites = [np.array(s).astype('f') for s in sprites]
scales = [
(np.ones((batch_size, ns, 2)) * (np.array(ss) / np.array(image_shape))).astype('f')
for ss, ns in zip(sprite_shapes, n_sprites)]
offsets = [np.random.rand(*s.shape).astype('f') * (1-s) for s in scales]
for b in range(batch_size):
print("\n\n")
print("Batch element : {}".format(b))
for f in range(n_flights):
print('\n')
print('flight : {}'.format(f))
print(sprite_color_names[f][b])
print(sprite_shape_names[f][b])
print(scales[f][b])
print(offsets[f][b])
backgrounds = np.array(backgrounds).astype('f')
return sprites, scales, offsets, backgrounds
def get_data(random_alpha=False, squash=None):
draw_shape = (56, 56)
batch_size = 2
dset = EmnistDataset(classes=[0, 1, 2, 3], include_blank=False, n_examples=100, shape=(28, 28), one_hot=False)
white = np.array([1., 1., 1.])[None, None, :]
black = np.array([0., 0., 0.])[None, None, :]
green = np.array([0., 1., 0.])[None, None, :]
cyan = np.array([0., 1., 1.])[None, None, :]
colours = [white, black, green, cyan]
sprite_pool = [dset.x[list(dset.y).index(idx)][..., None] / 255. for idx in range(4)]
_sprite_pool = []
for i, sp in enumerate(sprite_pool):
colour = colours[i]
if random_alpha:
alpha = np.random.rand(*sp[..., :1].shape)
else:
alpha = (sp.sum(-1) > 0)[..., None].astype('f')
alpha = squash_01(alpha, squash)
sp = colour * sp
sp = np.concatenate([sp, alpha], axis=-1)
_sprite_pool.append(sp)
sprite_pool = _sprite_pool
first0, first1, first2, first3 = sprite_pool
sprites0 = np.stack([first0, first1, first2, first3], axis=0)
sprites1 = np.stack([first3, first2, first1, np.zeros_like(first1)], axis=0)
sprites = np.stack([sprites0, sprites1], axis=0).astype('f')
scales = np.ones((batch_size, max_sprites, 2)).astype('f')
offsets = np.zeros_like(scales)
backgrounds = np.array(load_backgrounds("red_x blue_circle", draw_shape)) / 255.
backgrounds = backgrounds.astype('f')
sprites = squash_01(sprites, squash)
scales = squash_01(scales, squash)
offsets = squash_01(offsets, squash)
backgrounds = squash_01(backgrounds, squash)
return [sprites], [scales], [offsets], backgrounds
def run(device, show_plots, process_data=None, **get_data_kwargs):
with NumpySeed(100):
data = get_data(**get_data_kwargs)
if process_data is None:
process_data = lambda *x: x
sprites, scales, offsets, backgrounds = process_data(*data)
with tf.device('/{}:0'.format(device)):
images = render_sprites.render_sprites(sprites, scales, offsets, backgrounds)
sess = get_session()
result = sess.run(images)
result = np.clip(result, 1e-6, 1-1e-6)
if show_plots:
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(result[0])
ax2.imshow(result[1])
plt.show()
def visible_gpu():
d = os.getenv("CUDA_VISIBLE_DEVICES").split(",")[0]
try:
d = int(d)
except Exception:
return False
return d >= 0
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
def test_render_sprites_mostly_opaque(device, show_plots):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
def process_data(sprites, scales, offsets, backgrounds):
batch_size, max_sprites, *_ = sprites.shape
sprites[..., 3] = 1.0 # Make the image opaque
scales = 0.5 * np.ones((batch_size, max_sprites, 2)).astype('f')
offsets = np.array([[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]])
offsets = np.tile(offsets[None, ...], (batch_size, 1, 1)).astype('f')
return sprites, scales, offsets, backgrounds
run(device, show_plots, process_data)
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
def test_render_sprites_background_alpha(device, show_plots):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
def process_data(sprites, scales, offsets, backgrounds):
batch_size, max_sprites, *_ = sprites.shape
scales = 0.5 * np.ones((batch_size, max_sprites, 2)).astype('f')
offsets = np.array([[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]])
offsets = np.tile(offsets[None, ...], (batch_size, 1, 1)).astype('f')
return sprites, scales, offsets, backgrounds
run(device, show_plots, process_data)
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
def test_render_sprites_overlap(device, show_plots):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
run(device, show_plots)
@pytest.mark.skipif(not render_sprites.lib_avail(), reason="_render_sprites.so not available")
@pytest.mark.parametrize("device", "cpu gpu".split())
@pytest.mark.slow
def _test_gradient(device):
if device == "gpu" and visible_gpu():
pytest.xfail("no gpu is visible")
with NumpySeed(100):
with tf.device('/{}:0'.format(device)):
sprites, scales, offsets, backgrounds = get_data(random_alpha=True, squash=0.99)
sprites_tf = constant_op.constant(sprites)
scales_tf = constant_op.constant(scales)
offsets_tf = constant_op.constant(offsets)
backgrounds_tf = constant_op.constant(backgrounds)
images = render_sprites.render_sprites(sprites_tf, scales_tf, offsets_tf, backgrounds_tf)
sess = get_session()
with sess.as_default():
with tf.device(device):
err = gradient_checker.compute_gradient_error(
[sprites_tf, scales_tf, offsets_tf, backgrounds_tf],
[sprites.shape, scales.shape, offsets.shape, backgrounds.shape],
images,
backgrounds.shape,
[sprites, scales, offsets, backgrounds],
delta=0.002)
print("Jacobian error: {}".format(err))
threshold = 2e-4
assert err < threshold, "Jacobian error ({}) exceeded threshold ({})".format(err, threshold)
if __name__ == "__main__":
from contextlib import ExitStack
with NumpySeed(100000):
sprites, scales, offsets, backgrounds = _get_data()
device = 'gpu'
print("Running...")
session_config = tf.ConfigProto()
session_config.log_device_placement = 1
session_config.gpu_options.per_process_gpu_memory_fraction = 0.1
session_config.gpu_options.allow_growth = True
graph = tf.Graph()
sess = tf.Session(graph=graph, config=session_config)
with ExitStack() as stack:
stack.enter_context(graph.as_default())
stack.enter_context(sess)
stack.enter_context(sess.as_default())
sprites_ph = [tf.placeholder(tf.float32, (None, *s.shape[1:])) for s in sprites]
scales_ph = [tf.placeholder(tf.float32, (None, *s.shape[1:])) for s in scales]
offsets_ph = [tf.placeholder(tf.float32, (None, *s.shape[1:])) for s in offsets]
backgrounds_ph = tf.placeholder(tf.float32, (None, *backgrounds.shape[1:]))
with tf.device('/{}:0'.format(device)):
images = render_sprites.render_sprites(sprites_ph, scales_ph, offsets_ph, backgrounds_ph)
d = {}
d.update({ph: a for ph, a in zip(sprites_ph, sprites)})
d.update({ph: a for ph, a in zip(scales_ph, scales)})
d.update({ph: a for ph, a in zip(offsets_ph, offsets)})
d[backgrounds_ph] = backgrounds
result = sess.run(images, feed_dict=d)
from dps.utils import image_to_string
print(image_to_string(result[0, ..., 0]))
print()
print(image_to_string(result[0, ..., 1]))
print()
print(image_to_string(result[0, ..., 2]))
print()
print(result)
print("Done running.")
# Sometimes we get values like 1.0001, nothing really bad.
result = np.clip(result, 1e-6, 1-1e-6)
import matplotlib.pyplot as plt
from dps.utils import square_subplots
fig, axes = square_subplots(len(sprites[0]))
fig.suptitle(device)
for img, ax in zip(result, axes.flatten()):
ax.imshow(img)
plt.show()
| [
"numpy.uint8",
"numpy.clip",
"auto_yolo.tf_ops.render_sprites.render_sprites",
"numpy.random.rand",
"dps.datasets.load.load_backgrounds",
"numpy.array",
"dps.utils.resize_image",
"matplotlib.colors.to_rgb",
"pytest.xfail",
"dps.datasets.base.EmnistDataset",
"tensorflow.Graph",
"tensorflow.Sess... | [((587, 618), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (595, 618), True, 'import numpy as np\n'), ((849, 873), 'matplotlib.colors.to_rgb', 'mpl.colors.to_rgb', (['color'], {}), '(color)\n', (866, 873), True, 'import matplotlib as mpl\n'), ((929, 952), 'numpy.uint8', 'np.uint8', (['(255.0 * color)'], {}), '(255.0 * color)\n', (937, 952), True, 'import numpy as np\n'), ((963, 995), 'numpy.tile', 'np.tile', (['color', '(img.shape + (1,))'], {}), '(color, img.shape + (1,))\n', (970, 995), True, 'import numpy as np\n'), ((1254, 1271), 'imageio.imread', 'imageio.imread', (['f'], {}), '(f)\n', (1268, 1271), False, 'import imageio\n'), ((1284, 1324), 'dps.utils.resize_image', 'resize_image', (['image[..., 3]', 'patch_shape'], {}), '(image[..., 3], patch_shape)\n', (1296, 1324), False, 'from dps.utils import NumpySeed, resize_image\n'), ((1411, 1457), 'numpy.maximum', 'np.maximum', (['(importance * image[..., 3:4])', '(0.01)'], {}), '(importance * image[..., 3:4], 0.01)\n', (1421, 1457), True, 'import numpy as np\n'), ((1471, 1507), 'numpy.concatenate', 'np.concatenate', (['[image, imp]'], {'axis': '(2)'}), '([image, imp], axis=2)\n', (1485, 1507), True, 'import numpy as np\n'), ((3715, 3822), 'dps.datasets.base.EmnistDataset', 'EmnistDataset', ([], {'classes': '[0, 1, 2, 3]', 'include_blank': '(False)', 'n_examples': '(100)', 'shape': '(28, 28)', 'one_hot': '(False)'}), '(classes=[0, 1, 2, 3], include_blank=False, n_examples=100,\n shape=(28, 28), one_hot=False)\n', (3728, 3822), False, 'from dps.datasets.base import EmnistDataset\n'), ((4644, 4694), 'numpy.stack', 'np.stack', (['[first0, first1, first2, first3]'], {'axis': '(0)'}), '([first0, first1, first2, first3], axis=0)\n', (4652, 4694), True, 'import numpy as np\n'), ((4919, 4940), 'numpy.zeros_like', 'np.zeros_like', (['scales'], {}), '(scales)\n', (4932, 4940), True, 'import numpy as np\n'), ((886, 901), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (894, 901), True, 'import numpy as np\n'), ((1168, 1197), 'os.path.dirname', 'os.path.dirname', (['dps.__file__'], {}), '(dps.__file__)\n', (1183, 1197), False, 'import os\n'), ((2662, 2689), 'numpy.random.choice', 'np.random.choice', (['bg_colors'], {}), '(bg_colors)\n', (2678, 2689), True, 'import numpy as np\n'), ((2709, 2733), 'numpy.random.choice', 'np.random.choice', (['shapes'], {}), '(shapes)\n', (2725, 2733), True, 'import numpy as np\n'), ((3832, 3857), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3840, 3857), True, 'import numpy as np\n'), ((3882, 3907), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3890, 3907), True, 'import numpy as np\n'), ((3932, 3957), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (3940, 3957), True, 'import numpy as np\n'), ((3981, 4006), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 1.0])\n', (3989, 4006), True, 'import numpy as np\n'), ((4478, 4514), 'numpy.concatenate', 'np.concatenate', (['[sp, alpha]'], {'axis': '(-1)'}), '([sp, alpha], axis=-1)\n', (4492, 4514), True, 'import numpy as np\n'), ((5374, 5388), 'dps.utils.NumpySeed', 'NumpySeed', (['(100)'], {}), '(100)\n', (5383, 5388), False, 'from dps.utils import NumpySeed, resize_image\n'), ((5804, 5837), 'numpy.clip', 'np.clip', (['result', '(1e-06)', '(1 - 1e-06)'], {}), '(result, 1e-06, 1 - 1e-06)\n', (5811, 5837), True, 'import numpy as np\n'), ((5920, 5938), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (5932, 5938), True, 'import matplotlib.pyplot as plt\n'), ((6007, 6017), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6015, 6017), True, 'import matplotlib.pyplot as plt\n'), ((6444, 6477), 'pytest.xfail', 'pytest.xfail', (['"""no gpu is visible"""'], {}), "('no gpu is visible')\n", (6456, 6477), False, 'import pytest\n'), ((6738, 6788), 'numpy.array', 'np.array', (['[[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]]'], {}), '([[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]])\n', (6746, 6788), True, 'import numpy as np\n'), ((6210, 6236), 'auto_yolo.tf_ops.render_sprites.lib_avail', 'render_sprites.lib_avail', ([], {}), '()\n', (6234, 6236), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((7226, 7259), 'pytest.xfail', 'pytest.xfail', (['"""no gpu is visible"""'], {}), "('no gpu is visible')\n", (7238, 7259), False, 'import pytest\n'), ((7465, 7515), 'numpy.array', 'np.array', (['[[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]]'], {}), '([[0, 0], [0, 0.5], [0.5, 0], [0.5, 0.5]])\n', (7473, 7515), True, 'import numpy as np\n'), ((6989, 7015), 'auto_yolo.tf_ops.render_sprites.lib_avail', 'render_sprites.lib_avail', ([], {}), '()\n', (7013, 7015), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((7944, 7977), 'pytest.xfail', 'pytest.xfail', (['"""no gpu is visible"""'], {}), "('no gpu is visible')\n", (7956, 7977), False, 'import pytest\n'), ((7716, 7742), 'auto_yolo.tf_ops.render_sprites.lib_avail', 'render_sprites.lib_avail', ([], {}), '()\n', (7740, 7742), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((8254, 8287), 'pytest.xfail', 'pytest.xfail', (['"""no gpu is visible"""'], {}), "('no gpu is visible')\n", (8266, 8287), False, 'import pytest\n'), ((8298, 8312), 'dps.utils.NumpySeed', 'NumpySeed', (['(100)'], {}), '(100)\n', (8307, 8312), False, 'from dps.utils import NumpySeed, resize_image\n'), ((8032, 8058), 'auto_yolo.tf_ops.render_sprites.lib_avail', 'render_sprites.lib_avail', ([], {}), '()\n', (8056, 8058), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((9569, 9586), 'dps.utils.NumpySeed', 'NumpySeed', (['(100000)'], {}), '(100000)\n', (9578, 9586), False, 'from dps.utils import NumpySeed, resize_image\n'), ((9727, 9743), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (9741, 9743), True, 'import tensorflow as tf\n'), ((9937, 9947), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9945, 9947), True, 'import tensorflow as tf\n'), ((9963, 10009), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph', 'config': 'session_config'}), '(graph=graph, config=session_config)\n', (9973, 10009), True, 'import tensorflow as tf\n'), ((11414, 11447), 'numpy.clip', 'np.clip', (['result', '(1e-06)', '(1 - 1e-06)'], {}), '(result, 1e-06, 1 - 1e-06)\n', (11421, 11447), True, 'import numpy as np\n'), ((11700, 11710), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11708, 11710), True, 'import matplotlib.pyplot as plt\n'), ((454, 495), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(True)'}), '(log_device_placement=True)\n', (468, 495), True, 'import tensorflow as tf\n'), ((1036, 1072), 'numpy.concatenate', 'np.concatenate', (['[rgb, alpha]'], {'axis': '(2)'}), '([rgb, alpha], axis=2)\n', (1050, 1072), True, 'import numpy as np\n'), ((2302, 2335), 'numpy.random.choice', 'np.random.choice', (['colors'], {'size': 'ns'}), '(colors, size=ns)\n', (2318, 2335), True, 'import numpy as np\n'), ((2352, 2385), 'numpy.random.choice', 'np.random.choice', (['shapes'], {'size': 'ns'}), '(shapes, size=ns)\n', (2368, 2385), True, 'import numpy as np\n'), ((3525, 3546), 'numpy.array', 'np.array', (['backgrounds'], {}), '(backgrounds)\n', (3533, 3546), True, 'import numpy as np\n'), ((4288, 4322), 'numpy.random.rand', 'np.random.rand', (['*sp[..., :1].shape'], {}), '(*sp[..., :1].shape)\n', (4302, 4322), True, 'import numpy as np\n'), ((4744, 4765), 'numpy.zeros_like', 'np.zeros_like', (['first1'], {}), '(first1)\n', (4757, 4765), True, 'import numpy as np\n'), ((4790, 4828), 'numpy.stack', 'np.stack', (['[sprites0, sprites1]'], {'axis': '(0)'}), '([sprites0, sprites1], axis=0)\n', (4798, 4828), True, 'import numpy as np\n'), ((4855, 4892), 'numpy.ones', 'np.ones', (['(batch_size, max_sprites, 2)'], {}), '((batch_size, max_sprites, 2))\n', (4862, 4892), True, 'import numpy as np\n'), ((4969, 5018), 'dps.datasets.load.load_backgrounds', 'load_backgrounds', (['"""red_x blue_circle"""', 'draw_shape'], {}), "('red_x blue_circle', draw_shape)\n", (4985, 5018), False, 'from dps.datasets.load import load_backgrounds\n'), ((5646, 5714), 'auto_yolo.tf_ops.render_sprites.render_sprites', 'render_sprites.render_sprites', (['sprites', 'scales', 'offsets', 'backgrounds'], {}), '(sprites, scales, offsets, backgrounds)\n', (5675, 5714), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((8481, 8510), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['sprites'], {}), '(sprites)\n', (8501, 8510), False, 'from tensorflow.python.framework import constant_op\n'), ((8535, 8563), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['scales'], {}), '(scales)\n', (8555, 8563), False, 'from tensorflow.python.framework import constant_op\n'), ((8589, 8618), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['offsets'], {}), '(offsets)\n', (8609, 8618), False, 'from tensorflow.python.framework import constant_op\n'), ((8648, 8681), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['backgrounds'], {}), '(backgrounds)\n', (8668, 8681), False, 'from tensorflow.python.framework import constant_op\n'), ((8704, 8789), 'auto_yolo.tf_ops.render_sprites.render_sprites', 'render_sprites.render_sprites', (['sprites_tf', 'scales_tf', 'offsets_tf', 'backgrounds_tf'], {}), '(sprites_tf, scales_tf, offsets_tf, backgrounds_tf\n )\n', (8733, 8789), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((10024, 10035), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (10033, 10035), False, 'from contextlib import ExitStack\n'), ((10494, 10552), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, *backgrounds.shape[1:])'], {}), '(tf.float32, (None, *backgrounds.shape[1:]))\n', (10508, 10552), True, 'import tensorflow as tf\n'), ((11090, 11124), 'dps.utils.image_to_string', 'image_to_string', (['result[0, ..., 0]'], {}), '(result[0, ..., 0])\n', (11105, 11124), False, 'from dps.utils import image_to_string\n'), ((11156, 11190), 'dps.utils.image_to_string', 'image_to_string', (['result[0, ..., 1]'], {}), '(result[0, ..., 1])\n', (11171, 11190), False, 'from dps.utils import image_to_string\n'), ((11222, 11256), 'dps.utils.image_to_string', 'image_to_string', (['result[0, ..., 2]'], {}), '(result[0, ..., 2])\n', (11237, 11256), False, 'from dps.utils import image_to_string\n'), ((2870, 2881), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2878, 2881), True, 'import numpy as np\n'), ((6047, 6080), 'os.getenv', 'os.getenv', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (6056, 6080), False, 'import os\n'), ((6807, 6854), 'numpy.tile', 'np.tile', (['offsets[None, ...]', '(batch_size, 1, 1)'], {}), '(offsets[None, ...], (batch_size, 1, 1))\n', (6814, 6854), True, 'import numpy as np\n'), ((7534, 7581), 'numpy.tile', 'np.tile', (['offsets[None, ...]', '(batch_size, 1, 1)'], {}), '(offsets[None, ...], (batch_size, 1, 1))\n', (7541, 7581), True, 'import numpy as np\n'), ((10214, 10262), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, *s.shape[1:])'], {}), '(tf.float32, (None, *s.shape[1:]))\n', (10228, 10262), True, 'import tensorflow as tf\n'), ((10306, 10354), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, *s.shape[1:])'], {}), '(tf.float32, (None, *s.shape[1:]))\n', (10320, 10354), True, 'import tensorflow as tf\n'), ((10398, 10446), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, *s.shape[1:])'], {}), '(tf.float32, (None, *s.shape[1:]))\n', (10412, 10446), True, 'import tensorflow as tf\n'), ((10631, 10716), 'auto_yolo.tf_ops.render_sprites.render_sprites', 'render_sprites.render_sprites', (['sprites_ph', 'scales_ph', 'offsets_ph', 'backgrounds_ph'], {}), '(sprites_ph, scales_ph, offsets_ph, backgrounds_ph\n )\n', (10660, 10716), False, 'from auto_yolo.tf_ops import render_sprites\n'), ((2936, 2964), 'numpy.ones', 'np.ones', (['(batch_size, ns, 2)'], {}), '((batch_size, ns, 2))\n', (2943, 2964), True, 'import numpy as np\n'), ((3087, 3111), 'numpy.random.rand', 'np.random.rand', (['*s.shape'], {}), '(*s.shape)\n', (3101, 3111), True, 'import numpy as np\n'), ((6670, 6707), 'numpy.ones', 'np.ones', (['(batch_size, max_sprites, 2)'], {}), '((batch_size, max_sprites, 2))\n', (6677, 6707), True, 'import numpy as np\n'), ((7397, 7434), 'numpy.ones', 'np.ones', (['(batch_size, max_sprites, 2)'], {}), '((batch_size, max_sprites, 2))\n', (7404, 7434), True, 'import numpy as np\n'), ((8876, 8893), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (8885, 8893), True, 'import tensorflow as tf\n'), ((8921, 9171), 'tensorflow.python.ops.gradient_checker.compute_gradient_error', 'gradient_checker.compute_gradient_error', (['[sprites_tf, scales_tf, offsets_tf, backgrounds_tf]', '[sprites.shape, scales.shape, offsets.shape, backgrounds.shape]', 'images', 'backgrounds.shape', '[sprites, scales, offsets, backgrounds]'], {'delta': '(0.002)'}), '([sprites_tf, scales_tf, offsets_tf,\n backgrounds_tf], [sprites.shape, scales.shape, offsets.shape,\n backgrounds.shape], images, backgrounds.shape, [sprites, scales,\n offsets, backgrounds], delta=0.002)\n', (8960, 9171), False, 'from tensorflow.python.ops import gradient_checker\n'), ((2968, 2980), 'numpy.array', 'np.array', (['ss'], {}), '(ss)\n', (2976, 2980), True, 'import numpy as np\n'), ((2983, 3004), 'numpy.array', 'np.array', (['image_shape'], {}), '(image_shape)\n', (2991, 3004), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import pickle
from itertools import permutations
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import simplejson
from dask.dataframe.utils import make_meta as dask_make_meta
from kartothek.core._compat import ARROW_LARGER_EQ_0130
from kartothek.core.common_metadata import (
SchemaWrapper,
_diff_schemas,
_get_common_metadata_key,
empty_dataframe_from_schema,
make_meta,
read_schema_metadata,
store_schema_metadata,
validate_compatible,
validate_shared_columns,
)
from kartothek.serialization import ParquetSerializer
def test_store_schema_metadata(store, df_all_types):
store_schema_metadata(
schema=make_meta(df_all_types, origin="df_all_types"),
dataset_uuid="some_uuid",
store=store,
table="some_table",
)
key = "some_uuid/some_table/_common_metadata"
assert key in store.keys()
pq_file = pq.ParquetFile(store.open(key))
actual_schema = pq_file.schema.to_arrow_schema()
fields = [
pa.field("array_float32", pa.list_(pa.float64())),
pa.field("array_float64", pa.list_(pa.float64())),
pa.field("array_int16", pa.list_(pa.int64())),
pa.field("array_int32", pa.list_(pa.int64())),
pa.field("array_int64", pa.list_(pa.int64())),
pa.field("array_int8", pa.list_(pa.int64())),
pa.field("array_uint16", pa.list_(pa.uint64())),
pa.field("array_uint32", pa.list_(pa.uint64())),
pa.field("array_uint64", pa.list_(pa.uint64())),
pa.field("array_uint8", pa.list_(pa.uint64())),
pa.field("array_unicode", pa.list_(pa.string())),
pa.field("bool", pa.bool_()),
pa.field("byte", pa.binary()),
pa.field("date", pa.date32()),
pa.field("datetime64", pa.timestamp("us")),
pa.field("float32", pa.float64()),
pa.field("float64", pa.float64()),
pa.field("int16", pa.int64()),
pa.field("int32", pa.int64()),
pa.field("int64", pa.int64()),
pa.field("int8", pa.int64()),
pa.field("null", pa.null()),
pa.field("uint16", pa.uint64()),
pa.field("uint32", pa.uint64()),
pa.field("uint64", pa.uint64()),
pa.field("uint8", pa.uint64()),
pa.field("unicode", pa.string()),
]
if not ARROW_LARGER_EQ_0130:
fields.append(pa.field("__index_level_0__", pa.int64()))
expected_schema = pa.schema(fields)
assert actual_schema.remove_metadata() == expected_schema
def test_schema_roundtrip(df_all_types, store):
expected_meta = make_meta(df_all_types, origin="df_all_types")
store_schema_metadata(
expected_meta, dataset_uuid="dataset_uuid", store=store, table="table"
)
result = read_schema_metadata(
dataset_uuid="dataset_uuid", store=store, table="table"
)
assert result == expected_meta
def test_pickle(df_all_types):
obj1 = make_meta(df_all_types, origin="df_all_types")
s = pickle.dumps(obj1)
obj2 = pickle.loads(s)
assert obj1 == obj2
def test_wrapper(df_all_types):
obj = make_meta(df_all_types, origin="df_all_types")
assert isinstance(obj, SchemaWrapper)
assert isinstance(obj.metadata, dict)
assert isinstance(obj.internal(), pa.Schema)
obj2 = make_meta(df_all_types, origin="df_all_types")
assert obj == obj2
assert obj == obj2.internal()
assert obj.equals(obj2)
assert obj.equals(obj2.internal())
assert repr(obj) == repr(obj.internal())
assert isinstance(obj[0], pa.Field)
def test_unicode_col():
df = pd.DataFrame({"fö": [1]})
make_meta(df, origin="df")
def test_strip_categories():
input_df = pd.DataFrame(
{"categories": pd.Series(["a", "b", "c", "a"], dtype="category")}
)
assert len(input_df["categories"].cat.categories) > 1
meta = make_meta(input_df, origin="input_df")
# We strip categories to have the underlying type as the information.
# Categories also include their categorical values as type information,
# we're not interested in keeping them the same in all partitions.
assert not pa.types.is_dictionary(meta[0].type)
def test_reorder(df_all_types):
df2 = df_all_types.copy()
df2 = df2.reindex(reversed(df_all_types.columns), axis=1)
expected = make_meta(df_all_types, origin="df_all_types")
actual = make_meta(df2, origin="df2")
assert expected == actual
def test_reorder_partition_keys(df_all_types):
partition_keys = ["int8", "uint8", "array_unicode"]
df2 = df_all_types.copy()
df2 = df2.reindex(reversed(df_all_types.columns), axis=1)
expected = make_meta(
df_all_types, origin="df_all_types", partition_keys=partition_keys
)
assert expected.names[: len(partition_keys)] == partition_keys
actual = make_meta(df2, origin="df2", partition_keys=partition_keys)
assert expected == actual
def test_compat_old_rw_path(df_all_types, store):
# strip down DF before some column types weren't supported before anyway
df = df_all_types[
[
c
for c in df_all_types.columns
if (
not c.startswith("array_") # array types (always null)
and c != "unicode" # unicode type (alway null)
and "8" not in c # 8 bit types are casted to 64 bit
and "16" not in c # 16 bit types are casted to 64 bit
and "32" not in c # 32 bit types are casted to 64 bit
)
]
]
expected_meta = make_meta(df, origin="df")
# old schema write path
old_meta = dask_make_meta(df)
pa_table = pa.Table.from_pandas(old_meta)
buf = pa.BufferOutputStream()
pq.write_table(pa_table, buf, version="2.0")
key_old = _get_common_metadata_key("dataset_uuid_old", "table")
store.put(key_old, buf.getvalue().to_pybytes())
actual_meta = read_schema_metadata(
dataset_uuid="dataset_uuid_old", store=store, table="table"
)
validate_compatible([actual_meta, expected_meta])
store_schema_metadata(
schema=make_meta(df, origin="df"),
dataset_uuid="dataset_uuid_new",
store=store,
table="table",
)
key_new = _get_common_metadata_key("dataset_uuid_new", "table")
actual_df = ParquetSerializer.restore_dataframe(key=key_new, store=store)
actual_df["date"] = actual_df["date"].dt.date
pdt.assert_frame_equal(actual_df, old_meta)
def test_validate_compatible_same(df_all_types):
schema1 = make_meta(df_all_types, origin="1")
schema2 = make_meta(df_all_types, origin="2")
schema3 = make_meta(df_all_types, origin="3")
validate_compatible([])
validate_compatible([schema1])
validate_compatible([schema1, schema2])
validate_compatible([schema1, schema2, schema3])
@pytest.mark.parametrize("remove_metadata", [True, False])
@pytest.mark.parametrize("ignore_pandas", [True, False])
def test_validate_compatible_other_pandas(df_all_types, remove_metadata, ignore_pandas):
def _with_pandas(version):
schema = make_meta(df_all_types, origin=version)
metadata = schema.metadata
pandas_metadata = simplejson.loads(metadata[b"pandas"].decode("utf8"))
pandas_metadata["pandas_version"] = version
metadata[b"pandas"] = simplejson.dumps(pandas_metadata).encode("utf8")
schema = SchemaWrapper(pa.schema(schema, metadata), version)
if remove_metadata:
return schema.remove_metadata()
else:
return schema
schema1 = make_meta(df_all_types, origin="all")
schema2 = _with_pandas("0.19.0")
schema3 = _with_pandas("0.99.0")
if remove_metadata and not ignore_pandas:
# This should fail as long as we have the metadata attached
with pytest.raises(ValueError):
validate_compatible(
[schema1, schema2, schema3], ignore_pandas=ignore_pandas
)
schema1 = schema1.remove_metadata()
validate_compatible([schema1, schema2, schema3], ignore_pandas=ignore_pandas)
def test_validate_compatible_different(df_all_types):
df2 = df_all_types.loc[:, df_all_types.columns[:2]].copy()
schema1 = make_meta(df_all_types, origin="1")
schema2 = make_meta(df2, origin="2")
with pytest.raises(ValueError) as exc:
validate_compatible([schema1, schema2])
assert str(exc.value).startswith("Schema violation")
def test_validate_shared_columns_same(df_all_types):
schema1 = make_meta(df_all_types, origin="1")
schema2 = make_meta(df_all_types, origin="2")
schema3 = make_meta(df_all_types, origin="3").remove_metadata()
validate_shared_columns([])
validate_shared_columns([schema1])
validate_shared_columns([schema1, schema2])
with pytest.raises(ValueError):
validate_shared_columns([schema1, schema2, schema3])
validate_shared_columns([schema1, schema2, schema3], ignore_pandas=True)
validate_shared_columns(
[schema1.remove_metadata(), schema2.remove_metadata(), schema3]
)
def test_validate_shared_columns_no_share(df_all_types):
schema1 = make_meta(df_all_types.loc[:, df_all_types.columns[0:2]], origin="1")
schema2 = make_meta(df_all_types.loc[:, df_all_types.columns[2:4]], origin="2")
schema3 = make_meta(df_all_types.loc[:, df_all_types.columns[4:6]], origin="3")
validate_shared_columns([])
validate_shared_columns([schema1])
validate_shared_columns([schema1, schema2])
validate_shared_columns([schema1, schema2, schema3])
@pytest.mark.parametrize("remove_metadata", [True, False])
def test_validate_shared_columns_fail(df_all_types, remove_metadata):
df2 = df_all_types.copy()
df2["uint16"] = df2["uint16"].astype(float)
schema1 = make_meta(df_all_types, origin="1")
schema2 = make_meta(df2, origin="2")
if remove_metadata:
schema1 = schema1.remove_metadata()
schema2 = schema2.remove_metadata()
with pytest.raises(ValueError) as exc:
validate_shared_columns([schema1, schema2])
assert str(exc.value).startswith('Found incompatible entries for column "uint16"')
def test_validate_empty_dataframe(
df_all_types, df_all_types_schema, df_all_types_empty_schema
):
# Do not raise in case one of the schemas is of an empty dataframe
# Test all permutations to avoid that the implementation is sensitive on whether
# the first schema is empty/non-empty
for schemas in permutations([df_all_types_schema, df_all_types_empty_schema]):
validate_compatible(schemas)
validate_compatible([df_all_types_empty_schema, df_all_types_empty_schema])
@pytest.mark.parametrize(
"corrupt_column,corrupt_value,corrupt_dtype",
[
# reference column is a native type
("int8", -1.1, np.float64),
("int8", "a", np.object),
# reference column is an object
("unicode", -1.1, np.float64),
("unicode", 1, np.int64),
pytest.param(
"unicode",
None,
None,
marks=pytest.mark.xfail(
strict=True,
reason="This results in a `null` column which cannot be compared and must not fail",
),
),
# reference column is a native typed array
("array_int64", np.array([1.0], dtype=np.float64), np.object),
("array_int64", np.array(["a"], dtype=np.object), np.object),
# reference column is an object types arrayarray
("array_unicode", np.array([1], dtype=np.int8), np.object),
("array_unicode", np.array([1.0], dtype=np.float64), np.object),
],
)
def test_validate_empty_dataframe_corrupt_raises(
df_all_types,
df_all_types_schema,
df_all_types_empty_schema,
corrupt_column,
corrupt_value,
corrupt_dtype,
):
# In case there is something wrong with the schema, raise!
# First, an integer column carries a float or an object.
df_corrupt = df_all_types.copy()
# for value, dtype in [(-1.1, np.float64), ('a', np.object)]:
df_corrupt[corrupt_column] = pd.Series([corrupt_value], dtype=corrupt_dtype)
df_corrupt_meta = make_meta(df_corrupt, origin="1")
# Raise when comparing the proper to the corrupt schema
for schemas in permutations([df_all_types_schema, df_corrupt_meta]):
with pytest.raises(ValueError):
validate_compatible(schemas)
# Also raise if there is a schema originating from an empty DF to make
# sure the emptiness doesn't cancel the validation
for schemas in permutations(
[df_all_types_schema, df_corrupt_meta, df_all_types_empty_schema]
):
with pytest.raises(ValueError):
validate_compatible(schemas)
def test_validate_different_cats_same_type():
input_df = pd.DataFrame(
{"categories": pd.Series(["a", "b", "c", "a"], dtype="category")}
)
input_df_2 = pd.DataFrame(
{"categories": pd.Series(["f", "e", "e", "f"], dtype="category")}
)
input_df_3 = pd.DataFrame({"categories": pd.Series(["f", "e", "e", "f"])})
meta = make_meta(input_df, origin="1")
meta_2 = make_meta(input_df_2, origin="2")
meta_3 = make_meta(input_df_3, origin="3")
validate_compatible([meta, meta_2, meta_3])
def test_validate_different_cats_different_type():
input_df = pd.DataFrame(
{"categories": pd.Series(["a", "b", "c", "a"], dtype="category")}
)
input_df_2 = pd.DataFrame(
{"categories": pd.Series([b"f", b"e", b"e", b"f"], dtype="category")}
)
meta = make_meta(input_df, origin="1")
meta_2 = make_meta(input_df_2, origin="2")
with pytest.raises(ValueError):
validate_compatible([meta, meta_2])
@pytest.mark.parametrize("index", [pd.Int64Index([0]), pd.RangeIndex(start=0, stop=1)])
def test_schema_dataframe_rountrip(index, df_all_types):
df = pd.DataFrame(df_all_types, index=index)
schema = make_meta(df, origin="1")
actual_df = empty_dataframe_from_schema(schema, date_as_object=True)
validate_compatible([schema, make_meta(actual_df, origin="2")])
def test_empty_dataframe_from_schema(df_all_types):
schema = make_meta(df_all_types, origin="1")
actual_df = empty_dataframe_from_schema(schema)
expected_df = df_all_types.loc[[]]
expected_df["date"] = pd.Series([], dtype="datetime64[ns]")
for c in expected_df.columns:
if c.startswith("float"):
expected_df[c] = pd.Series([], dtype=float)
if c.startswith("int"):
expected_df[c] = pd.Series([], dtype=int)
if c.startswith("uint"):
expected_df[c] = pd.Series([], dtype=np.uint64)
pdt.assert_frame_equal(actual_df, expected_df)
def test_empty_dataframe_from_schema_columns(df_all_types):
schema = make_meta(df_all_types, origin="1")
actual_df = empty_dataframe_from_schema(schema, ["uint64", "int64"])
expected_df = df_all_types.loc[[], ["uint64", "int64"]]
pdt.assert_frame_equal(actual_df, expected_df)
def test_diff_schemas(df_all_types):
# Prepare a schema with one missing, one additional and one changed column
df2 = df_all_types.drop(columns=df_all_types.columns[0])
df2["new_col"] = pd.Series(df_all_types["bool"])
df2["int16"] = df2["int16"].astype(float)
df2 = df2.reset_index(drop=True)
schema2 = make_meta(df2, origin="2")
schema1 = make_meta(df_all_types, origin="1")
diff = _diff_schemas(schema1, schema2)
expected_arrow_diff = """Arrow schema:
@@ -1,5 +1,3 @@
-array_float32: list<item: double>
- child 0, item: double
array_float64: list<item: double>
child 0, item: double
array_int16: list<item: int64>
@@ -26,10 +24,11 @@
datetime64: timestamp[ns]
float32: double
float64: double
-int16: int64
+int16: double
int32: int64
int64: int64
int8: int64
+new_col: bool
null: null
uint16: uint64
uint32: uint64
"""
expected_pandas_diff = """Pandas_metadata:
@@ -3,12 +3,7 @@
'name': None,
'numpy_type': 'object',
'pandas_type': 'unicode'}],
- 'columns': [{'field_name': 'array_float32',
- 'metadata': None,
- 'name': 'array_float32',
- 'numpy_type': 'object',
- 'pandas_type': 'list[float64]'},
- {'field_name': 'array_float64',
+ 'columns': [{'field_name': 'array_float64',
'metadata': None,
'name': 'array_float64',
'numpy_type': 'object',
@@ -91,8 +86,8 @@
{'field_name': 'int16',
'metadata': None,
'name': 'int16',
- 'numpy_type': 'int64',
- 'pandas_type': 'int64'},
+ 'numpy_type': 'float64',
+ 'pandas_type': 'float64'},
{'field_name': 'int32',
'metadata': None,
'name': 'int32',
@@ -108,6 +103,11 @@
'name': 'int8',
'numpy_type': 'int64',
'pandas_type': 'int64'},
+ {'field_name': 'new_col',
+ 'metadata': None,
+ 'name': 'new_col',
+ 'numpy_type': 'bool',
+ 'pandas_type': 'bool'},
{'field_name': 'null',
'metadata': None,
'name': 'null',"""
assert diff == expected_arrow_diff + expected_pandas_diff
def test_validate_schema_non_overlapping_nulls(df_all_types_schema):
"""
Test that two schemas with non-overlapping null columns are valid
"""
first_ix = np.random.randint(len(df_all_types_schema))
second_ix = first_ix
while second_ix == first_ix:
second_ix = np.random.randint(len(df_all_types_schema))
first_null = pa.field(name=df_all_types_schema.names[first_ix], type=pa.null())
first_schema = df_all_types_schema.set(first_ix, first_null)
second_null = pa.field(name=df_all_types_schema.names[second_ix], type=pa.null())
second_schema = df_all_types_schema.set(second_ix, second_null)
for schemas in permutations([first_schema, second_schema]):
reference_schema = validate_compatible(schemas)
# The reference schema should be the original schema
# with the columns reconstructed
assert df_all_types_schema == reference_schema
| [
"kartothek.core.common_metadata._get_common_metadata_key",
"kartothek.core.common_metadata.read_schema_metadata",
"pyarrow.schema",
"pyarrow.BufferOutputStream",
"kartothek.core.common_metadata.make_meta",
"pickle.dumps",
"kartothek.core.common_metadata.validate_compatible",
"pyarrow.timestamp",
"nu... | [((6869, 6926), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""remove_metadata"""', '[True, False]'], {}), "('remove_metadata', [True, False])\n", (6892, 6926), False, 'import pytest\n'), ((6928, 6983), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ignore_pandas"""', '[True, False]'], {}), "('ignore_pandas', [True, False])\n", (6951, 6983), False, 'import pytest\n'), ((9585, 9642), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""remove_metadata"""', '[True, False]'], {}), "('remove_metadata', [True, False])\n", (9608, 9642), False, 'import pytest\n'), ((2497, 2514), 'pyarrow.schema', 'pa.schema', (['fields'], {}), '(fields)\n', (2506, 2514), True, 'import pyarrow as pa\n'), ((2648, 2694), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""'}), "(df_all_types, origin='df_all_types')\n", (2657, 2694), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((2699, 2797), 'kartothek.core.common_metadata.store_schema_metadata', 'store_schema_metadata', (['expected_meta'], {'dataset_uuid': '"""dataset_uuid"""', 'store': 'store', 'table': '"""table"""'}), "(expected_meta, dataset_uuid='dataset_uuid', store=\n store, table='table')\n", (2720, 2797), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((2820, 2897), 'kartothek.core.common_metadata.read_schema_metadata', 'read_schema_metadata', ([], {'dataset_uuid': '"""dataset_uuid"""', 'store': 'store', 'table': '"""table"""'}), "(dataset_uuid='dataset_uuid', store=store, table='table')\n", (2840, 2897), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((2991, 3037), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""'}), "(df_all_types, origin='df_all_types')\n", (3000, 3037), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((3046, 3064), 'pickle.dumps', 'pickle.dumps', (['obj1'], {}), '(obj1)\n', (3058, 3064), False, 'import pickle\n'), ((3076, 3091), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (3088, 3091), False, 'import pickle\n'), ((3160, 3206), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""'}), "(df_all_types, origin='df_all_types')\n", (3169, 3206), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((3352, 3398), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""'}), "(df_all_types, origin='df_all_types')\n", (3361, 3398), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((3644, 3669), 'pandas.DataFrame', 'pd.DataFrame', (["{'fö': [1]}"], {}), "({'fö': [1]})\n", (3656, 3669), True, 'import pandas as pd\n'), ((3674, 3700), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df'], {'origin': '"""df"""'}), "(df, origin='df')\n", (3683, 3700), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((3911, 3949), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['input_df'], {'origin': '"""input_df"""'}), "(input_df, origin='input_df')\n", (3920, 3949), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((4364, 4410), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""'}), "(df_all_types, origin='df_all_types')\n", (4373, 4410), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((4424, 4452), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df2'], {'origin': '"""df2"""'}), "(df2, origin='df2')\n", (4433, 4452), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((4696, 4773), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""', 'partition_keys': 'partition_keys'}), "(df_all_types, origin='df_all_types', partition_keys=partition_keys)\n", (4705, 4773), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((4869, 4928), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df2'], {'origin': '"""df2"""', 'partition_keys': 'partition_keys'}), "(df2, origin='df2', partition_keys=partition_keys)\n", (4878, 4928), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((5591, 5617), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df'], {'origin': '"""df"""'}), "(df, origin='df')\n", (5600, 5617), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((5662, 5680), 'dask.dataframe.utils.make_meta', 'dask_make_meta', (['df'], {}), '(df)\n', (5676, 5680), True, 'from dask.dataframe.utils import make_meta as dask_make_meta\n'), ((5696, 5726), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['old_meta'], {}), '(old_meta)\n', (5716, 5726), True, 'import pyarrow as pa\n'), ((5737, 5760), 'pyarrow.BufferOutputStream', 'pa.BufferOutputStream', ([], {}), '()\n', (5758, 5760), True, 'import pyarrow as pa\n'), ((5765, 5809), 'pyarrow.parquet.write_table', 'pq.write_table', (['pa_table', 'buf'], {'version': '"""2.0"""'}), "(pa_table, buf, version='2.0')\n", (5779, 5809), True, 'import pyarrow.parquet as pq\n'), ((5824, 5877), 'kartothek.core.common_metadata._get_common_metadata_key', '_get_common_metadata_key', (['"""dataset_uuid_old"""', '"""table"""'], {}), "('dataset_uuid_old', 'table')\n", (5848, 5877), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((5949, 6035), 'kartothek.core.common_metadata.read_schema_metadata', 'read_schema_metadata', ([], {'dataset_uuid': '"""dataset_uuid_old"""', 'store': 'store', 'table': '"""table"""'}), "(dataset_uuid='dataset_uuid_old', store=store, table=\n 'table')\n", (5969, 6035), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6049, 6098), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[actual_meta, expected_meta]'], {}), '([actual_meta, expected_meta])\n', (6068, 6098), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6275, 6328), 'kartothek.core.common_metadata._get_common_metadata_key', '_get_common_metadata_key', (['"""dataset_uuid_new"""', '"""table"""'], {}), "('dataset_uuid_new', 'table')\n", (6299, 6328), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6345, 6406), 'kartothek.serialization.ParquetSerializer.restore_dataframe', 'ParquetSerializer.restore_dataframe', ([], {'key': 'key_new', 'store': 'store'}), '(key=key_new, store=store)\n', (6380, 6406), False, 'from kartothek.serialization import ParquetSerializer\n'), ((6461, 6504), 'pandas.testing.assert_frame_equal', 'pdt.assert_frame_equal', (['actual_df', 'old_meta'], {}), '(actual_df, old_meta)\n', (6483, 6504), True, 'import pandas.testing as pdt\n'), ((6570, 6605), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (6579, 6605), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6620, 6655), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""2"""'}), "(df_all_types, origin='2')\n", (6629, 6655), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6670, 6705), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""3"""'}), "(df_all_types, origin='3')\n", (6679, 6705), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6710, 6733), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[]'], {}), '([])\n', (6729, 6733), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6738, 6768), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[schema1]'], {}), '([schema1])\n', (6757, 6768), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6773, 6812), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[schema1, schema2]'], {}), '([schema1, schema2])\n', (6792, 6812), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((6817, 6865), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[schema1, schema2, schema3]'], {}), '([schema1, schema2, schema3])\n', (6836, 6865), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((7602, 7639), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""all"""'}), "(df_all_types, origin='all')\n", (7611, 7639), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8036, 8113), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[schema1, schema2, schema3]'], {'ignore_pandas': 'ignore_pandas'}), '([schema1, schema2, schema3], ignore_pandas=ignore_pandas)\n', (8055, 8113), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8247, 8282), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (8256, 8282), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8297, 8323), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df2'], {'origin': '"""2"""'}), "(df2, origin='2')\n", (8306, 8323), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8541, 8576), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (8550, 8576), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8591, 8626), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""2"""'}), "(df_all_types, origin='2')\n", (8600, 8626), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8699, 8726), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[]'], {}), '([])\n', (8722, 8726), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8731, 8765), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1]'], {}), '([schema1])\n', (8754, 8765), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8770, 8813), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1, schema2]'], {}), '([schema1, schema2])\n', (8793, 8813), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8915, 8987), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1, schema2, schema3]'], {'ignore_pandas': '(True)'}), '([schema1, schema2, schema3], ignore_pandas=True)\n', (8938, 8987), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9168, 9237), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types.loc[:, df_all_types.columns[0:2]]'], {'origin': '"""1"""'}), "(df_all_types.loc[:, df_all_types.columns[0:2]], origin='1')\n", (9177, 9237), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9252, 9321), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types.loc[:, df_all_types.columns[2:4]]'], {'origin': '"""2"""'}), "(df_all_types.loc[:, df_all_types.columns[2:4]], origin='2')\n", (9261, 9321), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9336, 9405), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types.loc[:, df_all_types.columns[4:6]]'], {'origin': '"""3"""'}), "(df_all_types.loc[:, df_all_types.columns[4:6]], origin='3')\n", (9345, 9405), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9410, 9437), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[]'], {}), '([])\n', (9433, 9437), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9442, 9476), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1]'], {}), '([schema1])\n', (9465, 9476), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9481, 9524), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1, schema2]'], {}), '([schema1, schema2])\n', (9504, 9524), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9529, 9581), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1, schema2, schema3]'], {}), '([schema1, schema2, schema3])\n', (9552, 9581), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9805, 9840), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (9814, 9840), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((9855, 9881), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df2'], {'origin': '"""2"""'}), "(df2, origin='2')\n", (9864, 9881), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((10498, 10560), 'itertools.permutations', 'permutations', (['[df_all_types_schema, df_all_types_empty_schema]'], {}), '([df_all_types_schema, df_all_types_empty_schema])\n', (10510, 10560), False, 'from itertools import permutations\n'), ((10603, 10678), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[df_all_types_empty_schema, df_all_types_empty_schema]'], {}), '([df_all_types_empty_schema, df_all_types_empty_schema])\n', (10622, 10678), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((12109, 12156), 'pandas.Series', 'pd.Series', (['[corrupt_value]'], {'dtype': 'corrupt_dtype'}), '([corrupt_value], dtype=corrupt_dtype)\n', (12118, 12156), True, 'import pandas as pd\n'), ((12179, 12212), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_corrupt'], {'origin': '"""1"""'}), "(df_corrupt, origin='1')\n", (12188, 12212), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((12292, 12344), 'itertools.permutations', 'permutations', (['[df_all_types_schema, df_corrupt_meta]'], {}), '([df_all_types_schema, df_corrupt_meta])\n', (12304, 12344), False, 'from itertools import permutations\n'), ((12576, 12655), 'itertools.permutations', 'permutations', (['[df_all_types_schema, df_corrupt_meta, df_all_types_empty_schema]'], {}), '([df_all_types_schema, df_corrupt_meta, df_all_types_empty_schema])\n', (12588, 12655), False, 'from itertools import permutations\n'), ((13111, 13142), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['input_df'], {'origin': '"""1"""'}), "(input_df, origin='1')\n", (13120, 13142), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13156, 13189), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['input_df_2'], {'origin': '"""2"""'}), "(input_df_2, origin='2')\n", (13165, 13189), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13203, 13236), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['input_df_3'], {'origin': '"""3"""'}), "(input_df_3, origin='3')\n", (13212, 13236), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13241, 13284), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[meta, meta_2, meta_3]'], {}), '([meta, meta_2, meta_3])\n', (13260, 13284), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13574, 13605), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['input_df'], {'origin': '"""1"""'}), "(input_df, origin='1')\n", (13583, 13605), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13619, 13652), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['input_df_2'], {'origin': '"""2"""'}), "(input_df_2, origin='2')\n", (13628, 13652), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13889, 13928), 'pandas.DataFrame', 'pd.DataFrame', (['df_all_types'], {'index': 'index'}), '(df_all_types, index=index)\n', (13901, 13928), True, 'import pandas as pd\n'), ((13943, 13968), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df'], {'origin': '"""1"""'}), "(df, origin='1')\n", (13952, 13968), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13985, 14041), 'kartothek.core.common_metadata.empty_dataframe_from_schema', 'empty_dataframe_from_schema', (['schema'], {'date_as_object': '(True)'}), '(schema, date_as_object=True)\n', (14012, 14041), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((14177, 14212), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (14186, 14212), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((14229, 14264), 'kartothek.core.common_metadata.empty_dataframe_from_schema', 'empty_dataframe_from_schema', (['schema'], {}), '(schema)\n', (14256, 14264), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((14331, 14368), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""datetime64[ns]"""'}), "([], dtype='datetime64[ns]')\n", (14340, 14368), True, 'import pandas as pd\n'), ((14677, 14723), 'pandas.testing.assert_frame_equal', 'pdt.assert_frame_equal', (['actual_df', 'expected_df'], {}), '(actual_df, expected_df)\n', (14699, 14723), True, 'import pandas.testing as pdt\n'), ((14799, 14834), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (14808, 14834), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((14851, 14907), 'kartothek.core.common_metadata.empty_dataframe_from_schema', 'empty_dataframe_from_schema', (['schema', "['uint64', 'int64']"], {}), "(schema, ['uint64', 'int64'])\n", (14878, 14907), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((14973, 15019), 'pandas.testing.assert_frame_equal', 'pdt.assert_frame_equal', (['actual_df', 'expected_df'], {}), '(actual_df, expected_df)\n', (14995, 15019), True, 'import pandas.testing as pdt\n'), ((15220, 15251), 'pandas.Series', 'pd.Series', (["df_all_types['bool']"], {}), "(df_all_types['bool'])\n", (15229, 15251), True, 'import pandas as pd\n'), ((15350, 15376), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df2'], {'origin': '"""2"""'}), "(df2, origin='2')\n", (15359, 15376), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((15391, 15426), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""1"""'}), "(df_all_types, origin='1')\n", (15400, 15426), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((15438, 15469), 'kartothek.core.common_metadata._diff_schemas', '_diff_schemas', (['schema1', 'schema2'], {}), '(schema1, schema2)\n', (15451, 15469), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((18034, 18077), 'itertools.permutations', 'permutations', (['[first_schema, second_schema]'], {}), '([first_schema, second_schema])\n', (18046, 18077), False, 'from itertools import permutations\n'), ((4186, 4222), 'pyarrow.types.is_dictionary', 'pa.types.is_dictionary', (['meta[0].type'], {}), '(meta[0].type)\n', (4208, 4222), True, 'import pyarrow as pa\n'), ((7121, 7160), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': 'version'}), '(df_all_types, origin=version)\n', (7130, 7160), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8333, 8358), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8346, 8358), False, 'import pytest\n'), ((8375, 8414), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[schema1, schema2]'], {}), '([schema1, schema2])\n', (8394, 8414), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8823, 8848), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8836, 8848), False, 'import pytest\n'), ((8858, 8910), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1, schema2, schema3]'], {}), '([schema1, schema2, schema3])\n', (8881, 8910), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((10003, 10028), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10016, 10028), False, 'import pytest\n'), ((10045, 10088), 'kartothek.core.common_metadata.validate_shared_columns', 'validate_shared_columns', (['[schema1, schema2]'], {}), '([schema1, schema2])\n', (10068, 10088), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((10570, 10598), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['schemas'], {}), '(schemas)\n', (10589, 10598), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13662, 13687), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13675, 13687), False, 'import pytest\n'), ((13697, 13732), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[meta, meta_2]'], {}), '([meta, meta_2])\n', (13716, 13732), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((13770, 13788), 'pandas.Int64Index', 'pd.Int64Index', (['[0]'], {}), '([0])\n', (13783, 13788), True, 'import pandas as pd\n'), ((13790, 13820), 'pandas.RangeIndex', 'pd.RangeIndex', ([], {'start': '(0)', 'stop': '(1)'}), '(start=0, stop=1)\n', (13803, 13820), True, 'import pandas as pd\n'), ((18106, 18134), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['schemas'], {}), '(schemas)\n', (18125, 18134), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((765, 811), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""df_all_types"""'}), "(df_all_types, origin='df_all_types')\n", (774, 811), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((1745, 1755), 'pyarrow.bool_', 'pa.bool_', ([], {}), '()\n', (1753, 1755), True, 'import pyarrow as pa\n'), ((1783, 1794), 'pyarrow.binary', 'pa.binary', ([], {}), '()\n', (1792, 1794), True, 'import pyarrow as pa\n'), ((1822, 1833), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (1831, 1833), True, 'import pyarrow as pa\n'), ((1867, 1885), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (1879, 1885), True, 'import pyarrow as pa\n'), ((1916, 1928), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1926, 1928), True, 'import pyarrow as pa\n'), ((1959, 1971), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1969, 1971), True, 'import pyarrow as pa\n'), ((2000, 2010), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2008, 2010), True, 'import pyarrow as pa\n'), ((2039, 2049), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2047, 2049), True, 'import pyarrow as pa\n'), ((2078, 2088), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2086, 2088), True, 'import pyarrow as pa\n'), ((2116, 2126), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2124, 2126), True, 'import pyarrow as pa\n'), ((2154, 2163), 'pyarrow.null', 'pa.null', ([], {}), '()\n', (2161, 2163), True, 'import pyarrow as pa\n'), ((2193, 2204), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (2202, 2204), True, 'import pyarrow as pa\n'), ((2234, 2245), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (2243, 2245), True, 'import pyarrow as pa\n'), ((2275, 2286), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (2284, 2286), True, 'import pyarrow as pa\n'), ((2315, 2326), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (2324, 2326), True, 'import pyarrow as pa\n'), ((2357, 2368), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (2366, 2368), True, 'import pyarrow as pa\n'), ((3784, 3833), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c', 'a']"], {'dtype': '"""category"""'}), "(['a', 'b', 'c', 'a'], dtype='category')\n", (3793, 3833), True, 'import pandas as pd\n'), ((6142, 6168), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df'], {'origin': '"""df"""'}), "(df, origin='df')\n", (6151, 6168), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((7437, 7464), 'pyarrow.schema', 'pa.schema', (['schema', 'metadata'], {}), '(schema, metadata)\n', (7446, 7464), True, 'import pyarrow as pa\n'), ((7841, 7866), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7854, 7866), False, 'import pytest\n'), ((7880, 7957), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['[schema1, schema2, schema3]'], {'ignore_pandas': 'ignore_pandas'}), '([schema1, schema2, schema3], ignore_pandas=ignore_pandas)\n', (7899, 7957), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((8641, 8676), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['df_all_types'], {'origin': '"""3"""'}), "(df_all_types, origin='3')\n", (8650, 8676), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((12359, 12384), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12372, 12384), False, 'import pytest\n'), ((12398, 12426), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['schemas'], {}), '(schemas)\n', (12417, 12426), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((12684, 12709), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12697, 12709), False, 'import pytest\n'), ((12723, 12751), 'kartothek.core.common_metadata.validate_compatible', 'validate_compatible', (['schemas'], {}), '(schemas)\n', (12742, 12751), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((11339, 11372), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float64'}), '([1.0], dtype=np.float64)\n', (11347, 11372), True, 'import numpy as np\n'), ((11410, 11442), 'numpy.array', 'np.array', (["['a']"], {'dtype': 'np.object'}), "(['a'], dtype=np.object)\n", (11418, 11442), True, 'import numpy as np\n'), ((11539, 11567), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int8'}), '([1], dtype=np.int8)\n', (11547, 11567), True, 'import numpy as np\n'), ((11607, 11640), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float64'}), '([1.0], dtype=np.float64)\n', (11615, 11640), True, 'import numpy as np\n'), ((12852, 12901), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c', 'a']"], {'dtype': '"""category"""'}), "(['a', 'b', 'c', 'a'], dtype='category')\n", (12861, 12901), True, 'import pandas as pd\n'), ((12963, 13012), 'pandas.Series', 'pd.Series', (["['f', 'e', 'e', 'f']"], {'dtype': '"""category"""'}), "(['f', 'e', 'e', 'f'], dtype='category')\n", (12972, 13012), True, 'import pandas as pd\n'), ((13065, 13096), 'pandas.Series', 'pd.Series', (["['f', 'e', 'e', 'f']"], {}), "(['f', 'e', 'e', 'f'])\n", (13074, 13096), True, 'import pandas as pd\n'), ((13390, 13439), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c', 'a']"], {'dtype': '"""category"""'}), "(['a', 'b', 'c', 'a'], dtype='category')\n", (13399, 13439), True, 'import pandas as pd\n'), ((13501, 13554), 'pandas.Series', 'pd.Series', (["[b'f', b'e', b'e', b'f']"], {'dtype': '"""category"""'}), "([b'f', b'e', b'e', b'f'], dtype='category')\n", (13510, 13554), True, 'import pandas as pd\n'), ((14075, 14107), 'kartothek.core.common_metadata.make_meta', 'make_meta', (['actual_df'], {'origin': '"""2"""'}), "(actual_df, origin='2')\n", (14084, 14107), False, 'from kartothek.core.common_metadata import SchemaWrapper, _diff_schemas, _get_common_metadata_key, empty_dataframe_from_schema, make_meta, read_schema_metadata, store_schema_metadata, validate_compatible, validate_shared_columns\n'), ((14466, 14492), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (14475, 14492), True, 'import pandas as pd\n'), ((14554, 14578), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (14563, 14578), True, 'import pandas as pd\n'), ((14641, 14671), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'np.uint64'}), '([], dtype=np.uint64)\n', (14650, 14671), True, 'import pandas as pd\n'), ((17783, 17792), 'pyarrow.null', 'pa.null', ([], {}), '()\n', (17790, 17792), True, 'import pyarrow as pa\n'), ((17935, 17944), 'pyarrow.null', 'pa.null', ([], {}), '()\n', (17942, 17944), True, 'import pyarrow as pa\n'), ((1141, 1153), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1151, 1153), True, 'import pyarrow as pa\n'), ((1200, 1212), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1210, 1212), True, 'import pyarrow as pa\n'), ((1257, 1267), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1265, 1267), True, 'import pyarrow as pa\n'), ((1312, 1322), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1320, 1322), True, 'import pyarrow as pa\n'), ((1367, 1377), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1375, 1377), True, 'import pyarrow as pa\n'), ((1421, 1431), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1429, 1431), True, 'import pyarrow as pa\n'), ((1477, 1488), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (1486, 1488), True, 'import pyarrow as pa\n'), ((1534, 1545), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (1543, 1545), True, 'import pyarrow as pa\n'), ((1591, 1602), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (1600, 1602), True, 'import pyarrow as pa\n'), ((1647, 1658), 'pyarrow.uint64', 'pa.uint64', ([], {}), '()\n', (1656, 1658), True, 'import pyarrow as pa\n'), ((1705, 1716), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1714, 1716), True, 'import pyarrow as pa\n'), ((2462, 2472), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2470, 2472), True, 'import pyarrow as pa\n'), ((7357, 7390), 'simplejson.dumps', 'simplejson.dumps', (['pandas_metadata'], {}), '(pandas_metadata)\n', (7373, 7390), False, 'import simplejson\n'), ((11089, 11214), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'reason': '"""This results in a `null` column which cannot be compared and must not fail"""'}), "(strict=True, reason=\n 'This results in a `null` column which cannot be compared and must not fail'\n )\n", (11106, 11214), False, 'import pytest\n')] |
import os
import torch, cv2
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_seg_map_sequence(label_masks):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
label_colours = get_pascal_labels()
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, 21):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def fixed_resize(sample, resolution, flagval=None):
if flagval is None:
if sample.ndim == 2:
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
if sample.ndim == 2 or (sample.ndim == 3 and sample.shape[2] == 3):
sample = cv2.resize(sample, resolution, interpolation=flagval)
else:
tmp = sample
sample = np.zeros(np.append(resolution, tmp.shape[2]), dtype=np.float32)
for ii in range(sample.shape[2]):
sample[:, :, ii] = cv2.resize(tmp[:, :, ii], resolution, interpolation=flagval)
return sample
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
for key, val in param.items():
log_file.write(key + ':' + str(val) + '\n')
log_file.close()
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
loss = criterion(logit, target.long())
if size_average:
loss /= (h * w)
if batch_average:
loss /= n
return loss
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power) | [
"matplotlib.pyplot.imshow",
"numpy.all",
"torch.nn.CrossEntropyLoss",
"numpy.asarray",
"numpy.append",
"numpy.array",
"numpy.zeros",
"cv2.resize",
"matplotlib.pyplot.show"
] | [((323, 649), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, \n 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128,\n 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, \n 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64,\n 128]]'], {}), '([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],\n [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0],\n [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128\n ], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0\n ], [0, 64, 128]])\n', (333, 649), True, 'import numpy as np\n'), ((1190, 1246), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1])'], {'dtype': 'np.int16'}), '((mask.shape[0], mask.shape[1]), dtype=np.int16)\n', (1198, 1246), True, 'import numpy as np\n'), ((2442, 2497), 'numpy.zeros', 'np.zeros', (['(label_mask.shape[0], label_mask.shape[1], 3)'], {}), '((label_mask.shape[0], label_mask.shape[1], 3))\n', (2450, 2497), True, 'import numpy as np\n'), ((3688, 3774), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'ignore_index': 'ignore_index', 'size_average': '(False)'}), '(weight=weight, ignore_index=ignore_index, size_average=\n False)\n', (3707, 3774), True, 'import torch.nn as nn\n'), ((2606, 2621), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {}), '(rgb)\n', (2616, 2621), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2640), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2638, 2640), True, 'import matplotlib.pyplot as plt\n'), ((2958, 3011), 'cv2.resize', 'cv2.resize', (['sample', 'resolution'], {'interpolation': 'flagval'}), '(sample, resolution, interpolation=flagval)\n', (2968, 3011), False, 'import torch, cv2\n'), ((3070, 3105), 'numpy.append', 'np.append', (['resolution', 'tmp.shape[2]'], {}), '(resolution, tmp.shape[2])\n', (3079, 3105), True, 'import numpy as np\n'), ((3198, 3258), 'cv2.resize', 'cv2.resize', (['tmp[:, :, ii]', 'resolution'], {'interpolation': 'flagval'}), '(tmp[:, :, ii], resolution, interpolation=flagval)\n', (3208, 3258), False, 'import torch, cv2\n'), ((1643, 1662), 'numpy.array', 'np.array', (['rgb_masks'], {}), '(rgb_masks)\n', (1651, 1662), True, 'import numpy as np\n'), ((1328, 1358), 'numpy.all', 'np.all', (['(mask == label)'], {'axis': '(-1)'}), '(mask == label, axis=-1)\n', (1334, 1358), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators import base
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import log_loss
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
class BaseTest(tf.test.TestCase):
"""Test base estimators."""
def testOneDim(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
feature_columns = learn.infer_real_valued_columns_from_input(x)
regressor = learn.LinearRegressor(feature_columns=feature_columns)
regressor.fit(x, y, max_steps=100)
score = mean_squared_error(y, np.array(list(regressor.predict(x))))
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
def testIris(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target], max_steps=100)
score = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testIrisAllVariables(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target], max_steps=100)
self.assertEqual(
classifier.get_variable_names(),
["centered_bias_weight",
"centered_bias_weight/Adagrad",
"global_step",
# Double slashes appear because the column name is empty. If it was not
# empty, the variable names would be "linear/column_name/weight" etc.
"linear//weight",
"linear//weight/Ftrl",
"linear//weight/Ftrl_1",
"linear/bias_weight",
"linear/bias_weight/Ftrl",
"linear/bias_weight/Ftrl_1"])
def testIrisSummaries(self):
iris = datasets.load_iris()
output_dir = tempfile.mkdtemp() + "learn_tests/"
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, model_dir=output_dir)
classifier.fit(iris.data, iris.target, max_steps=100)
score = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
# TODO(ipolosukhin): Check that summaries are correctly written.
def testIrisContinueTraining(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, iris.target, steps=100)
score1 = accuracy_score(iris.target, list(classifier.predict(iris.data)))
classifier.fit(iris.data, iris.target, steps=500)
score2 = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(
score2, score1,
"Failed with score2 {0} <= score1 {1}".format(score2, score1))
def testIrisStreaming(self):
iris = datasets.load_iris()
def iris_data():
while True:
for x in iris.data:
yield x
def iris_predict_data():
for x in iris.data:
yield x
def iris_target():
while True:
for y in iris.target:
yield y
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris_data(), iris_target(), max_steps=500)
score1 = accuracy_score(iris.target,
list(classifier.predict(iris.data)))
score2 = accuracy_score(iris.target,
list(classifier.predict(iris_predict_data())))
self.assertGreater(score1, 0.5, "Failed with score = {0}".format(score1))
self.assertEqual(score2, score1, "Scores from {0} iterator doesn't "
"match score {1} from full "
"data.".format(score2, score1))
def testIris_proba(self):
# If sklearn available.
if log_loss:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, iris.target, max_steps=250)
score = log_loss(iris.target, list(classifier.predict_proba(iris.data)))
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
def testBoston(self):
random.seed(42)
boston = datasets.load_boston()
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(boston.data))
regressor.fit(boston.data, boston.target, max_steps=500)
score = mean_squared_error(
boston.target, np.array(list(regressor.predict(boston.data))))
self.assertLess(score, 150, "Failed with score = {0}".format(score))
def testUnfitted(self):
estimator = learn.TensorFlowEstimator(model_fn=None, n_classes=1)
with self.assertRaises(base.NotFittedError):
estimator.predict([1, 2, 3])
with self.assertRaises(base.NotFittedError):
estimator.save("/tmp/path")
if __name__ == "__main__":
tf.test.main()
| [
"numpy.random.rand",
"tensorflow.contrib.learn.python.learn.datasets.load_iris",
"tensorflow.contrib.learn.python.learn.datasets.load_boston",
"random.seed",
"tensorflow.test.main",
"tensorflow.contrib.learn.python.learn.TensorFlowEstimator",
"tempfile.mkdtemp",
"tensorflow.contrib.learn.python.learn.... | [((6403, 6417), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (6415, 6417), True, 'import tensorflow as tf\n'), ((1427, 1442), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1438, 1442), False, 'import random\n'), ((1451, 1471), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1465, 1471), True, 'import numpy as np\n'), ((1512, 1557), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['x'], {}), '(x)\n', (1554, 1557), False, 'from tensorflow.contrib.learn.python import learn\n'), ((1574, 1628), 'tensorflow.contrib.learn.python.learn.LinearRegressor', 'learn.LinearRegressor', ([], {'feature_columns': 'feature_columns'}), '(feature_columns=feature_columns)\n', (1595, 1628), False, 'from tensorflow.contrib.learn.python import learn\n'), ((1847, 1867), 'tensorflow.contrib.learn.python.learn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (1865, 1867), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((2279, 2299), 'tensorflow.contrib.learn.python.learn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (2297, 2299), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((3075, 3095), 'tensorflow.contrib.learn.python.learn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (3093, 3095), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((3642, 3662), 'tensorflow.contrib.learn.python.learn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (3660, 3662), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((4230, 4250), 'tensorflow.contrib.learn.python.learn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (4248, 4250), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((5699, 5714), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (5710, 5714), False, 'import random\n'), ((5728, 5750), 'tensorflow.contrib.learn.python.learn.datasets.load_boston', 'datasets.load_boston', ([], {}), '()\n', (5748, 5750), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((6151, 6204), 'tensorflow.contrib.learn.python.learn.TensorFlowEstimator', 'learn.TensorFlowEstimator', ([], {'model_fn': 'None', 'n_classes': '(1)'}), '(model_fn=None, n_classes=1)\n', (6176, 6204), False, 'from tensorflow.contrib.learn.python import learn\n'), ((3113, 3131), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3129, 3131), False, 'import tempfile\n'), ((5259, 5274), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (5270, 5274), False, 'import random\n'), ((5288, 5308), 'tensorflow.contrib.learn.python.learn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (5306, 5308), False, 'from tensorflow.contrib.learn.python.learn import datasets\n'), ((1933, 1986), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['iris.data'], {}), '(iris.data)\n', (1975, 1986), False, 'from tensorflow.contrib.learn.python import learn\n'), ((2365, 2418), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['iris.data'], {}), '(iris.data)\n', (2407, 2418), False, 'from tensorflow.contrib.learn.python import learn\n'), ((3214, 3267), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['iris.data'], {}), '(iris.data)\n', (3256, 3267), False, 'from tensorflow.contrib.learn.python import learn\n'), ((3728, 3781), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['iris.data'], {}), '(iris.data)\n', (3770, 3781), False, 'from tensorflow.contrib.learn.python import learn\n'), ((4565, 4618), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['iris.data'], {}), '(iris.data)\n', (4607, 4618), False, 'from tensorflow.contrib.learn.python import learn\n'), ((5814, 5869), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['boston.data'], {}), '(boston.data)\n', (5856, 5869), False, 'from tensorflow.contrib.learn.python import learn\n'), ((5378, 5431), 'tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input', 'learn.infer_real_valued_columns_from_input', (['iris.data'], {}), '(iris.data)\n', (5420, 5431), False, 'from tensorflow.contrib.learn.python import learn\n')] |
# import modules
import numpy as np
from unitvector import unitvector
from azimuthangle import azimuthangle
'''
tangentlineatcirclept means: 'tangent--line at circle point'
# Description.
Calculates a line that is tangent to a specificed point that belongs to a
circular arc. The code verifies if the point belongs to the circular arc
equation (i.e. the circle), and tolerates some points that could be
closer to the circle by recalculating the y coordianted based on the x
coordinate of the point. If the point belongs to the circle of the arc, it
verfies if that point is betwwen the limits that defines the arc.
# External sub-function(s): unitvector, azimuthangle.
# Input(s).
Two-dimensional vector that defines the point that belongs to the circle
(atCirclePointVec);
Structure of an circular arc from which the circle belongs and at which
the tanget line is wanted to obtain (slipCircleSTR). It's obtained previously
with 'defineslipcircle' function. This structure has the following fields:
center: center of the slip arc;
radius: radius of the slip arc;
iniAngGrad: counter clockwise angle (in sexagesimal grades)
from a reference unit vector [1 0] to the initial radius that
defines the arc;
endAngGrad: counter clockwise angle (in sexagesimal grades)
from a reference unit vector [1 0] to the final radius that
defines the arc;
deepDist: deepest distance from toe--point horizontal
reference where the arc passes;
leftDist: most left distance from toe--point vertical
reference where the arc passes;
# Output(s).
Boolean variable with a true value if the given point is inside the
circular arc (isPtBetweenArcLimitsTrue).
Structure of an infinite line (tangentLineSTR). The following fields forms
this structure:
refPtVec
unitVec
slope
nearestAzimuthAngRad
farestAzimuthAngRad
intercept
Among the fields of this structure, some of them are easy to understand;
but the nearesAzimuthAngRad is the angle of the azimuth (given in radians)
of the tangent sense that is nearest to the reference vector [1 0] when
turning couterclockwise sense.
Then the 'farestAzimuthAngRad' is a similar azimuth angle of the oposite
tangent sense.
# Example1:
atCirclePointVec = np.array([16.7036, 14.1941])
slipCircleSTR = {'center':np.array([31.3936, 45.3936]), 'radius':34.4848,
'iniAngGrad': 218.3437, 'endAngGrad': 284.4522, 'deepDist': 10.9088,
'leftDist': -3.0912}
---
isPtBetweenArcLimitsTrue, tangentLineSTR = tangentlineatcirclept\
(atCirclePointVec, slipCircleSTR)
'''
def tangentlineatcirclept(atCirclePointVec, slipCircleSTR):
#Recalculating the point at circle
distanceTolerance = 0.01*slipCircleSTR['radius']
#Point to cicle--center vector
ptCenVec = atCirclePointVec-slipCircleSTR['center']
unitPtCenVec = unitvector(ptCenVec)
#vector length
vectorLength = np.sqrt(np.dot(ptCenVec, ptCenVec))
#vector length must be near to the circle radius
if vectorLength <= slipCircleSTR['radius']+distanceTolerance and \
vectorLength >= slipCircleSTR['radius']-distanceTolerance:
#Calculate the new at--circle point
newAtCirclePtVec = slipCircleSTR['center']+slipCircleSTR['radius']\
*unitPtCenVec
else:
#print ('Error in "tangentlineatcirclept": The given point does not '+
#'belongs to the circle', sep="")
newAtCirclePtVec = slipCircleSTR['center']+slipCircleSTR['radius']\
*unitPtCenVec
#Tangent line paramemters
tangentPoint = newAtCirclePtVec
unitTangentVec = np.array([unitPtCenVec[1], -1*unitPtCenVec[0]])
if unitTangentVec[0] == 0:
tangentSlope = np.inf
else:
tangentSlope = unitTangentVec[1]/unitTangentVec[0]
tangentIntercept = tangentPoint[1]-tangentSlope*tangentPoint[0]
#Verifying if the point is between the arc extremes
azimuthAngleGrad = azimuthangle(unitPtCenVec)*180/np.pi
if azimuthAngleGrad >= slipCircleSTR['iniAngGrad']:
if azimuthAngleGrad <= slipCircleSTR['endAngGrad']:
isPtBetweenArcLimitsTrue = True
else:
isPtBetweenArcLimitsTrue = False
else:
isPtBetweenArcLimitsTrue = False
#calculating nearest and farest azimuth
angle1Rad = azimuthangle(unitTangentVec)
angle2Rad = azimuthangle(-1*unitTangentVec)
if angle1Rad < angle2Rad:
nearestAzimuthAngRad = angle1Rad
farestAzimuthAngRad = angle2Rad
elif angle1Rad > angle2Rad:
nearestAzimuthAngRad = angle2Rad
farestAzimuthAngRad = angle1Rad
elif angle1Rad == angle2Rad:
nearestAzimuthAngRad = angle1Rad
farestAzimuthAngRad = angle1Rad
#Building the structure
tangentLineSTR = {
'refPtVec': tangentPoint,
'unitVec': unitTangentVec,
'slope': tangentSlope,
'nearestAzimuthAngRad': nearestAzimuthAngRad,
'farestAzimuthAngRad': farestAzimuthAngRad,
'intercept': tangentIntercept}
return isPtBetweenArcLimitsTrue, tangentLineSTR
'''
BSD 2 license.
Copyright (c) 2016, Universidad Nacional de Colombia, <NAME>.
Suarez-Burgoa and <NAME>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| [
"numpy.array",
"numpy.dot",
"azimuthangle.azimuthangle",
"unitvector.unitvector"
] | [((2920, 2940), 'unitvector.unitvector', 'unitvector', (['ptCenVec'], {}), '(ptCenVec)\n', (2930, 2940), False, 'from unitvector import unitvector\n'), ((3712, 3761), 'numpy.array', 'np.array', (['[unitPtCenVec[1], -1 * unitPtCenVec[0]]'], {}), '([unitPtCenVec[1], -1 * unitPtCenVec[0]])\n', (3720, 3761), True, 'import numpy as np\n'), ((4424, 4452), 'azimuthangle.azimuthangle', 'azimuthangle', (['unitTangentVec'], {}), '(unitTangentVec)\n', (4436, 4452), False, 'from azimuthangle import azimuthangle\n'), ((4470, 4503), 'azimuthangle.azimuthangle', 'azimuthangle', (['(-1 * unitTangentVec)'], {}), '(-1 * unitTangentVec)\n', (4482, 4503), False, 'from azimuthangle import azimuthangle\n'), ((2995, 3021), 'numpy.dot', 'np.dot', (['ptCenVec', 'ptCenVec'], {}), '(ptCenVec, ptCenVec)\n', (3001, 3021), True, 'import numpy as np\n'), ((4046, 4072), 'azimuthangle.azimuthangle', 'azimuthangle', (['unitPtCenVec'], {}), '(unitPtCenVec)\n', (4058, 4072), False, 'from azimuthangle import azimuthangle\n')] |
import cv2
import sys
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import sleep
from keras.models import load_model
from scipy import stats
from collections import Counter
class EmotionFacePredictor():
'''
Class for handling model building and new data classification
'''
def __init__(self, home, cv2_path, model_path):
self.home = home # where script lives
self.cv2_path = cv2_path # where face processing files can be found (from cv2)
self.cascade_file = self.cv2_path+'haarcascade_frontalface_alt.xml'
self.model_path = model_path
self.emo_dict = {0:'Angry', 1: 'Fear', 2:'Happy', 3: 'Sad', 4:'Surprise', 5: 'Neutral'} # new dict of output labels
self.x_range = list(range(6))
self.emo_list = list(self.emo_dict.values()) # labels
def run_setup(self):
self.load_model()
self.load_face_cascade()
# plt.ion()
self.best_model._make_predict_function()
def load_model(self):
if os.path.exists(self.model_path):
self.best_model = load_model(self.model_path)
else:
print(f'Model not found check path:\n{self.model_path}')
def load_face_cascade(self):
if os.path.exists(self.cascade_file):
self.faceCascade = cv2.CascadeClassifier(self.cascade_file)
else:
print(f'Model not found check path:\n{self.cascade_file}')
def classify_faces_image(self, img):
self.img = cv2.imread(img)
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) # convert img to grayscale
faces = self.faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
print(f'Found {len(faces)} faces')
if len(faces)>0:
# Create array to average responses
face_paths = []
df_probas = []
df_predict = []
cnt = 1
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(self.gray, (x, y), (x+w, y+h), (0, 255, 0), 2)
self.sub_face = self.gray[y:y+h, x:x+w]
sb2 = cv2.resize(self.sub_face, (48, 48))
sb3 = np.expand_dims(sb2, axis=3)
sb4 = np.array([sb3])
f_path = './static/images/face_'+str(cnt)+'.png'
cv2.imwrite(f_path, self.sub_face)
face_paths.append(f_path)
self.test_pred_y = self.best_model.predict_classes(sb4)
self.test_pred_proba = self.best_model.predict_proba(sb4)
print(self.test_pred_y)
print(self.test_pred_proba)
print(self.emo_dict[self.test_pred_y[0]])
cnt +=1
df_probas.append(self.test_pred_proba)
df_predict.append(self.test_pred_y)
print('I SHOULD BE RETURNING STUFF')
return (face_paths, np.array(df_predict), np.array(df_probas))
else:
print('No faces found!')
return None
def classify_faces_video(self, duration=10, write_imgs=False):
self.capture_duration = duration
start_time = time.time()
video_capture = cv2.VideoCapture(0)
# self.results_df
while( int(time.time() - start_time) < self.capture_duration ):
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
print(f'Found {len(faces)} faces')
if len(faces)>0:
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_face = frame[y:y+h, x:x+w]
if write_imgs:
face_file_name = "faces/face_" + str(y) + ".jpg"
cv2.imwrite(face_file_name, sub_face)
gray_image = cv2.cvtColor(sub_face, cv2.COLOR_BGR2GRAY)
sb2 = cv2.resize(gray_image, (48, 48))
sb3 = np.expand_dims(sb2, axis=3)
sb4 = np.array([sb3])
test_pred_y = self.best_model.predict_classes(sb4)
test_pred_proba = self.best_model.predict_proba(sb4)
print(test_pred_y)
print(test_pred_proba)
print(self.emo_dict[test_pred_y[0]])
# cv2.imshow('image', sub_face)
plt.title(self.emo_dict[test_pred_y[0]])
plt.xticks(range(6), list(self.emo_dict.values()))
plt.plot(test_pred_proba[0])
# plt.bar(self.x_range, test_pred_proba[0])
# plt.draw()
# sleep(.5)
# plt.pause(0.5)
# plt.clf()
# cv2.imshow('Video', frame)
# sleep(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
plt.clf()
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
def classify_faces_recorded_movie(self, file_path, write_imgs=False):
# self.capture_duration = duration
start_time = time.time()
video_capture = cv2.VideoCapture(file_path)
# self.results_df
self.total_df_probas = []
self.total_df_predict = []
# while( int(time.time() - start_time) < self.capture_duration ):
# Capture frame-by-frame
# total_frames = 5000
ret = True
while ret:
ret, frame = video_capture.read()
print ('ret is: ')
print(ret)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
print(f'Found {len(faces)} faces')
if len(faces)>0:
# Create array to average responses
self.temp_df_probas = []
self.temp_df_predict = []
cnt = 1
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_face = frame[y:y+h, x:x+w]
if write_imgs:
face_file_name = "faces/face_" + str(y) + ".jpg"
cv2.imwrite(face_file_name, sub_face)
gray_image = cv2.cvtColor(sub_face, cv2.COLOR_BGR2GRAY)
sb2 = cv2.resize(gray_image, (48, 48))
sb3 = np.expand_dims(sb2, axis=3)
sb4 = np.array([sb3])
self.test_pred_y = self.best_model.predict_classes(sb4)
self.test_pred_proba = self.best_model.predict_proba(sb4)
print(self.test_pred_y)
print(self.test_pred_proba)
print(self.emo_dict[self.test_pred_y[0]])
self.temp_df_probas.append(self.test_pred_proba)
self.temp_df_predict.append(self.test_pred_y[0])
self.total_df_probas.append(np.array(self.temp_df_probas).mean(axis=0))
mode = Counter(self.temp_df_predict).most_common(1)
self.total_df_predict.append(mode[0][0])
else:
self.total_df_probas.append(np.array([0, 0, 0, 0, 0, 0]))
self.total_df_predict.append(np.NaN)
if cv2.waitKey(1) & 0xFF == ord('q'):
plt.clf()
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
if __name__=='__main__':
home = '/home/danny/Desktop/galvanize/emotion_face_classification/src/'
# home = '/home/ubuntu/efc/src/'
cv2_path = '/home/danny/anaconda3/lib/python3.6/site-packages/cv2/data/'
bestmodelfilepath = home + 'CNN_cont.hdf5'
efp = EmotionFacePredictor(home, cv2_path, bestmodelfilepath)
efp.run_setup()
# efp.classify_faces_image('./faces/face_174.jpg')
# efp.classify_faces_video()
| [
"cv2.rectangle",
"os.path.exists",
"cv2.imwrite",
"keras.models.load_model",
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"collections.Counter",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"time.time",
"numpy... | [((1060, 1091), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (1074, 1091), False, 'import os\n'), ((1279, 1312), 'os.path.exists', 'os.path.exists', (['self.cascade_file'], {}), '(self.cascade_file)\n', (1293, 1312), False, 'import os\n'), ((1532, 1547), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (1542, 1547), False, 'import cv2\n'), ((1568, 1610), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img, cv2.COLOR_BGR2GRAY)\n', (1580, 1610), False, 'import cv2\n'), ((3345, 3356), 'time.time', 'time.time', ([], {}), '()\n', (3354, 3356), False, 'import time\n'), ((3381, 3400), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3397, 3400), False, 'import cv2\n'), ((5550, 5573), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5571, 5573), False, 'import cv2\n'), ((5714, 5725), 'time.time', 'time.time', ([], {}), '()\n', (5723, 5725), False, 'import time\n'), ((5750, 5777), 'cv2.VideoCapture', 'cv2.VideoCapture', (['file_path'], {}), '(file_path)\n', (5766, 5777), False, 'import cv2\n'), ((8307, 8330), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8328, 8330), False, 'import cv2\n'), ((1123, 1150), 'keras.models.load_model', 'load_model', (['self.model_path'], {}), '(self.model_path)\n', (1133, 1150), False, 'from keras.models import load_model\n'), ((1345, 1385), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['self.cascade_file'], {}), '(self.cascade_file)\n', (1366, 1385), False, 'import cv2\n'), ((3601, 3640), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3613, 3640), False, 'import cv2\n'), ((6171, 6210), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (6183, 6210), False, 'import cv2\n'), ((2173, 2237), 'cv2.rectangle', 'cv2.rectangle', (['self.gray', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(self.gray, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (2186, 2237), False, 'import cv2\n'), ((2312, 2347), 'cv2.resize', 'cv2.resize', (['self.sub_face', '(48, 48)'], {}), '(self.sub_face, (48, 48))\n', (2322, 2347), False, 'import cv2\n'), ((2371, 2398), 'numpy.expand_dims', 'np.expand_dims', (['sb2'], {'axis': '(3)'}), '(sb2, axis=3)\n', (2385, 2398), True, 'import numpy as np\n'), ((2422, 2437), 'numpy.array', 'np.array', (['[sb3]'], {}), '([sb3])\n', (2430, 2437), True, 'import numpy as np\n'), ((2519, 2553), 'cv2.imwrite', 'cv2.imwrite', (['f_path', 'self.sub_face'], {}), '(f_path, self.sub_face)\n', (2530, 2553), False, 'import cv2\n'), ((3097, 3117), 'numpy.array', 'np.array', (['df_predict'], {}), '(df_predict)\n', (3105, 3117), True, 'import numpy as np\n'), ((3119, 3138), 'numpy.array', 'np.array', (['df_probas'], {}), '(df_probas)\n', (3127, 3138), True, 'import numpy as np\n'), ((5423, 5432), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5430, 5432), True, 'import matplotlib.pyplot as plt\n'), ((8179, 8188), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8186, 8188), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3457), 'time.time', 'time.time', ([], {}), '()\n', (3455, 3457), False, 'import time\n'), ((4068, 4128), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (4081, 4128), False, 'import cv2\n'), ((4379, 4421), 'cv2.cvtColor', 'cv2.cvtColor', (['sub_face', 'cv2.COLOR_BGR2GRAY'], {}), '(sub_face, cv2.COLOR_BGR2GRAY)\n', (4391, 4421), False, 'import cv2\n'), ((4448, 4480), 'cv2.resize', 'cv2.resize', (['gray_image', '(48, 48)'], {}), '(gray_image, (48, 48))\n', (4458, 4480), False, 'import cv2\n'), ((4508, 4535), 'numpy.expand_dims', 'np.expand_dims', (['sb2'], {'axis': '(3)'}), '(sb2, axis=3)\n', (4522, 4535), True, 'import numpy as np\n'), ((4563, 4578), 'numpy.array', 'np.array', (['[sb3]'], {}), '([sb3])\n', (4571, 4578), True, 'import numpy as np\n'), ((4934, 4974), 'matplotlib.pyplot.title', 'plt.title', (['self.emo_dict[test_pred_y[0]]'], {}), '(self.emo_dict[test_pred_y[0]])\n', (4943, 4974), True, 'import matplotlib.pyplot as plt\n'), ((5066, 5094), 'matplotlib.pyplot.plot', 'plt.plot', (['test_pred_proba[0]'], {}), '(test_pred_proba[0])\n', (5074, 5094), True, 'import matplotlib.pyplot as plt\n'), ((5372, 5386), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5383, 5386), False, 'import cv2\n'), ((6797, 6857), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (6810, 6857), False, 'import cv2\n'), ((7108, 7150), 'cv2.cvtColor', 'cv2.cvtColor', (['sub_face', 'cv2.COLOR_BGR2GRAY'], {}), '(sub_face, cv2.COLOR_BGR2GRAY)\n', (7120, 7150), False, 'import cv2\n'), ((7177, 7209), 'cv2.resize', 'cv2.resize', (['gray_image', '(48, 48)'], {}), '(gray_image, (48, 48))\n', (7187, 7209), False, 'import cv2\n'), ((7237, 7264), 'numpy.expand_dims', 'np.expand_dims', (['sb2'], {'axis': '(3)'}), '(sb2, axis=3)\n', (7251, 7264), True, 'import numpy as np\n'), ((7292, 7307), 'numpy.array', 'np.array', (['[sb3]'], {}), '([sb3])\n', (7300, 7307), True, 'import numpy as np\n'), ((8030, 8058), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (8038, 8058), True, 'import numpy as np\n'), ((8128, 8142), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8139, 8142), False, 'import cv2\n'), ((4308, 4345), 'cv2.imwrite', 'cv2.imwrite', (['face_file_name', 'sub_face'], {}), '(face_file_name, sub_face)\n', (4319, 4345), False, 'import cv2\n'), ((7037, 7074), 'cv2.imwrite', 'cv2.imwrite', (['face_file_name', 'sub_face'], {}), '(face_file_name, sub_face)\n', (7048, 7074), False, 'import cv2\n'), ((7866, 7895), 'collections.Counter', 'Counter', (['self.temp_df_predict'], {}), '(self.temp_df_predict)\n', (7873, 7895), False, 'from collections import Counter\n'), ((7799, 7828), 'numpy.array', 'np.array', (['self.temp_df_probas'], {}), '(self.temp_df_probas)\n', (7807, 7828), True, 'import numpy as np\n')] |
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp
def make_data_loader(cfg, is_train, is_distributed=False, start_iter=0):
scale = cfg.SPARSE3D.VOXEL_SCALE
full_scale=cfg.SPARSE3D.VOXEL_FULL_SCALE
val_reps = cfg.SPARSE3D.VAL_REPS
batch_size = cfg.SOLVER.IMS_PER_BATCH
dimension=3
# VALID_CLAS_IDS have been mapped to the range {0,1,...,19}
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
def get_files(split):
import os
cur_path = os.path.dirname(os.path.abspath(__file__))
dset_path = f'{cur_path}/ScanNetTorch'
with open(f'{cur_path}/Benchmark_Small/scannetv1_{split}.txt') as f:
scene_names = [l.strip() for l in f.readlines()]
files = [f'{dset_path}/{scene}/{scene}_vh_clean_2.pth' for scene in scene_names]
return files
train,val=[],[]
for x in torch.utils.data.DataLoader(
get_files('train'),
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
train.append(x)
for x in torch.utils.data.DataLoader(
get_files('val'),
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
val.append(x)
print('Training examples:', len(train))
print('Validation examples:', len(val))
#Elastic distortion
blur0=np.ones((3,1,1)).astype('float32')/3
blur1=np.ones((1,3,1)).astype('float32')/3
blur2=np.ones((1,1,3)).astype('float32')/3
def elastic(x,gran,mag):
bb=np.abs(x).max(0).astype(np.int32)//gran+3
noise=[np.random.randn(bb[0],bb[1],bb[2]).astype('float32') for _ in range(3)]
noise=[scipy.ndimage.filters.convolve(n,blur0,mode='constant',cval=0) for n in noise]
noise=[scipy.ndimage.filters.convolve(n,blur1,mode='constant',cval=0) for n in noise]
noise=[scipy.ndimage.filters.convolve(n,blur2,mode='constant',cval=0) for n in noise]
noise=[scipy.ndimage.filters.convolve(n,blur0,mode='constant',cval=0) for n in noise]
noise=[scipy.ndimage.filters.convolve(n,blur1,mode='constant',cval=0) for n in noise]
noise=[scipy.ndimage.filters.convolve(n,blur2,mode='constant',cval=0) for n in noise]
ax=[np.linspace(-(b-1)*gran,(b-1)*gran,b) for b in bb]
interp=[scipy.interpolate.RegularGridInterpolator(ax,n,bounds_error=0,fill_value=0) for n in noise]
def g(x_):
return np.hstack([i(x_)[:,None] for i in interp])
return x+g(x)*mag
def trainMerge(tbl):
locs=[]
feats=[]
labels=[]
for idx,i in enumerate(tbl):
a,b,c=train[i] # a:xyz b:color c:label
m=np.eye(3)+np.random.randn(3,3)*0.1 # aug: position distortion
m[0][0]*=np.random.randint(0,2)*2-1 # aug: x flip
m*=scale
theta=np.random.rand()*2*math.pi # rotation aug
m=np.matmul(m,[[math.cos(theta),math.sin(theta),0],[-math.sin(theta),math.cos(theta),0],[0,0,1]])
a=np.matmul(a,m)
a=elastic(a,6*scale//50,40*scale/50)
a=elastic(a,20*scale//50,160*scale/50)
m=a.min(0)
M=a.max(0)
q=M-m
# aug: the centroid between [0,full_scale]
offset = -m + np.clip(full_scale-M+m-0.001, 0, None) * np.random.rand(3)+np.clip(full_scale-M+m+0.001,None,0)*np.random.rand(3)
a+=offset
idxs=(a.min(1)>=0)*(a.max(1)<full_scale)
assert np.all(idxs), "some points are missed in train"
a=a[idxs]
b=b[idxs]
c=c[idxs]
a=torch.from_numpy(a).long()
locs.append(torch.cat([a,torch.LongTensor(a.shape[0],1).fill_(idx)],1))
feats.append(torch.from_numpy(b)+torch.randn(3)*0.1)
labels.append(torch.from_numpy(c))
locs=torch.cat(locs,0)
feats=torch.cat(feats,0)
labels=torch.cat(labels,0)
#batch_scopes(locs, scale)
return {'x': [locs,feats], 'y': labels.long(), 'id': tbl}
train_data_loader = torch.utils.data.DataLoader(
list(range(len(train))),batch_size=batch_size, collate_fn=trainMerge, num_workers=20*0, shuffle=True)
valOffsets=[0]
valLabels=[]
for idx,x in enumerate(val):
valOffsets.append(valOffsets[-1]+x[2].size)
valLabels.append(x[2].astype(np.int32))
valLabels=np.hstack(valLabels)
def valMerge(tbl):
locs=[]
feats=[]
labels=[]
point_ids=[]
for idx,i in enumerate(tbl):
a,b,c=val[i]
m=np.eye(3)
m[0][0]*=np.random.randint(0,2)*2-1
m*=scale
theta=np.random.rand()*2*math.pi
m=np.matmul(m,[[math.cos(theta),math.sin(theta),0],[-math.sin(theta),math.cos(theta),0],[0,0,1]])
a=np.matmul(a,m)+full_scale/2+np.random.uniform(-2,2,3)
m=a.min(0)
M=a.max(0)
q=M-m
offset=-m+np.clip(full_scale-M+m-0.001,0,None)*np.random.rand(3)+np.clip(full_scale-M+m+0.001,None,0)*np.random.rand(3)
a+=offset
idxs=(a.min(1)>=0)*(a.max(1)<full_scale)
assert np.all(idxs), "some points are missed in val"
a=a[idxs]
b=b[idxs]
c=c[idxs]
a=torch.from_numpy(a).long()
locs.append(torch.cat([a,torch.LongTensor(a.shape[0],1).fill_(idx)],1))
feats.append(torch.from_numpy(b))
labels.append(torch.from_numpy(c))
point_ids.append(torch.from_numpy(np.nonzero(idxs)[0]+valOffsets[i]))
locs=torch.cat(locs,0)
feats=torch.cat(feats,0)
labels=torch.cat(labels,0)
point_ids=torch.cat(point_ids,0)
return {'x': [locs,feats], 'y': labels.long(), 'id': tbl, 'point_ids': point_ids}
val_data_loader = torch.utils.data.DataLoader(
list(range(len(val))),batch_size=batch_size, collate_fn=valMerge, num_workers=20,shuffle=True)
if is_train:
return train_data_loader
else:
return val_data_loader
def locations_to_position(locations, voxel_scale):
return [location_to_position(loc, voxel_scale) for loc in locations]
def location_to_position(location, voxel_scale):
assert location.shape[1] == 4
return location[:,0:3].float() / voxel_scale
def batch_scopes(location, voxel_scale):
batch_size = torch.max(location[:,3])+1
s = 0
e = 0
scopes = []
for i in range(batch_size):
e += torch.sum(location[:,3]==i)
xyz = location[s:e,0:3].float() / voxel_scale
s = e.clone()
xyz_max = xyz.max(0)[0]
xyz_min = xyz.min(0)[0]
xyz_scope = xyz_max - xyz_min
print(f"min:{xyz_min} max:{xyz_max} scope:{xyz_scope}")
scopes.append(xyz_scope)
scopes = torch.cat(scopes, 0)
return scopes
| [
"numpy.clip",
"numpy.random.rand",
"numpy.hstack",
"torch.LongTensor",
"torch.max",
"multiprocessing.cpu_count",
"torch.from_numpy",
"math.cos",
"numpy.array",
"torch.sum",
"numpy.linspace",
"numpy.matmul",
"torch.randn",
"numpy.abs",
"numpy.eye",
"numpy.ones",
"numpy.nonzero",
"nu... | [((616, 701), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,\n 39])\n', (624, 701), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4438, 4458), 'numpy.hstack', 'np.hstack', (['valLabels'], {}), '(valLabels)\n', (4447, 4458), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((6722, 6742), 'torch.cat', 'torch.cat', (['scopes', '(0)'], {}), '(scopes, 0)\n', (6731, 6742), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3926, 3944), 'torch.cat', 'torch.cat', (['locs', '(0)'], {}), '(locs, 0)\n', (3935, 3944), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3956, 3975), 'torch.cat', 'torch.cat', (['feats', '(0)'], {}), '(feats, 0)\n', (3965, 3975), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3988, 4008), 'torch.cat', 'torch.cat', (['labels', '(0)'], {}), '(labels, 0)\n', (3997, 4008), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5590, 5608), 'torch.cat', 'torch.cat', (['locs', '(0)'], {}), '(locs, 0)\n', (5599, 5608), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5620, 5639), 'torch.cat', 'torch.cat', (['feats', '(0)'], {}), '(feats, 0)\n', (5629, 5639), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5652, 5672), 'torch.cat', 'torch.cat', (['labels', '(0)'], {}), '(labels, 0)\n', (5661, 5672), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5688, 5711), 'torch.cat', 'torch.cat', (['point_ids', '(0)'], {}), '(point_ids, 0)\n', (5697, 5711), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((6339, 6364), 'torch.max', 'torch.max', (['location[:, 3]'], {}), '(location[:, 3])\n', (6348, 6364), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((6435, 6465), 'torch.sum', 'torch.sum', (['(location[:, 3] == i)'], {}), '(location[:, 3] == i)\n', (6444, 6465), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((768, 793), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (783, 793), False, 'import os\n'), ((1216, 1230), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1228, 1230), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1382, 1396), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1394, 1396), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2386, 2433), 'numpy.linspace', 'np.linspace', (['(-(b - 1) * gran)', '((b - 1) * gran)', 'b'], {}), '(-(b - 1) * gran, (b - 1) * gran, b)\n', (2397, 2433), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3130, 3145), 'numpy.matmul', 'np.matmul', (['a', 'm'], {}), '(a, m)\n', (3139, 3145), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3578, 3590), 'numpy.all', 'np.all', (['idxs'], {}), '(idxs)\n', (3584, 3590), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4615, 4624), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4621, 4624), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5183, 5195), 'numpy.all', 'np.all', (['idxs'], {}), '(idxs)\n', (5189, 5195), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1186, 1202), 'torch.load', 'torch.load', (['x[0]'], {}), '(x[0])\n', (1196, 1202), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1352, 1368), 'torch.load', 'torch.load', (['x[0]'], {}), '(x[0])\n', (1362, 1368), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1534, 1552), 'numpy.ones', 'np.ones', (['(3, 1, 1)'], {}), '((3, 1, 1))\n', (1541, 1552), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1579, 1597), 'numpy.ones', 'np.ones', (['(1, 3, 1)'], {}), '((1, 3, 1))\n', (1586, 1597), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1624, 1642), 'numpy.ones', 'np.ones', (['(1, 1, 3)'], {}), '((1, 1, 3))\n', (1631, 1642), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2810, 2819), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2816, 2819), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3894, 3913), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (3910, 3913), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4881, 4908), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)', '(3)'], {}), '(-2, 2, 3)\n', (4898, 4908), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5433, 5452), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (5449, 5452), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5478, 5497), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (5494, 5497), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1752, 1788), 'numpy.random.randn', 'np.random.randn', (['bb[0]', 'bb[1]', 'bb[2]'], {}), '(bb[0], bb[1], bb[2])\n', (1767, 1788), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2820, 2841), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (2835, 2841), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2891, 2914), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2908, 2914), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2968, 2984), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2982, 2984), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3435, 3479), 'numpy.clip', 'np.clip', (['(full_scale - M + m + 0.001)', 'None', '(0)'], {}), '(full_scale - M + m + 0.001, None, 0)\n', (3442, 3479), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3472, 3489), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (3486, 3489), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3698, 3717), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (3714, 3717), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3830, 3849), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (3846, 3849), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4644, 4667), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4661, 4667), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4706, 4722), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4720, 4722), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4853, 4868), 'numpy.matmul', 'np.matmul', (['a', 'm'], {}), '(a, m)\n', (4862, 4868), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5040, 5084), 'numpy.clip', 'np.clip', (['(full_scale - M + m + 0.001)', 'None', '(0)'], {}), '(full_scale - M + m + 0.001, None, 0)\n', (5047, 5084), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5077, 5094), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (5091, 5094), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5301, 5320), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (5317, 5320), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3036, 3051), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3044, 3051), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3052, 3067), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3060, 3067), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3089, 3104), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3097, 3104), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3376, 3420), 'numpy.clip', 'np.clip', (['(full_scale - M + m - 0.001)', '(0)', 'None'], {}), '(full_scale - M + m - 0.001, 0, None)\n', (3383, 3420), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3417, 3434), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (3431, 3434), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3850, 3864), 'torch.randn', 'torch.randn', (['(3)'], {}), '(3)\n', (3861, 3864), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4759, 4774), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (4767, 4774), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4775, 4790), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (4783, 4790), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4812, 4827), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (4820, 4827), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4985, 5029), 'numpy.clip', 'np.clip', (['(full_scale - M + m - 0.001)', '(0)', 'None'], {}), '(full_scale - M + m - 0.001, 0, None)\n', (4992, 5029), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5022, 5039), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (5036, 5039), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3073, 3088), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3081, 3088), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4796, 4811), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (4804, 4811), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5543, 5559), 'numpy.nonzero', 'np.nonzero', (['idxs'], {}), '(idxs)\n', (5553, 5559), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((1697, 1706), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1703, 1706), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3760, 3791), 'torch.LongTensor', 'torch.LongTensor', (['a.shape[0]', '(1)'], {}), '(a.shape[0], 1)\n', (3776, 3791), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5363, 5394), 'torch.LongTensor', 'torch.LongTensor', (['a.shape[0]', '(1)'], {}), '(a.shape[0], 1)\n', (5379, 5394), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n')] |
import math
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
import sys
sys.path.extend(['../../../gym-guidance-collision-avoidance-single'])
from gym_guidance_collision_avoidance_single.envs.config import Config
__author__ = "<NAME> <<EMAIL>>"
class SingleAircraftEnv(gym.Env):
"""
This is the airspace simulator where we can control single aircraft (yellow aircraft)
to reach the goal position (green star) while avoiding conflicts with other intruder aircraft (red aircraft).
**STATE:**
The state consists all the information needed for the ownship to choose an optimal action:
position, velocity of intruder aircraft
position, velocity, speed, heading angle, bank angle
position of the goal
In the beginning of each episode, the ownship starts in the bottom right corner of the map and the heading angle is
pointing directly to the center of the map.
**ACTIONS:**
The action is either applying +1, 0 or -1 for the change of bank angle and +1, 0, -1 for the change of acceleration
"""
def __init__(self):
self.load_config()
self.state = None
self.viewer = None
# build observation space and action space
state_dimension = self.intruder_size * 4 + 8
self.observation_space = spaces.Box(low=-1000, high=1000, shape=(state_dimension,), dtype=np.float32)
self.action_space = spaces.Discrete(9)
self.position_range = spaces.Box(
low=np.array([0, 0]),
high=np.array([self.window_width, self.window_height]),
dtype=np.float32)
self.seed(2)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def load_config(self):
# input dim
self.window_width = Config.window_width
self.window_height = Config.window_height
self.intruder_size = Config.intruder_size
self.EPISODES = Config.EPISODES
self.G = Config.G
self.tick = Config.tick
self.scale = Config.scale
self.minimum_separation = Config.minimum_separation
self.NMAC_dist = Config.NMAC_dist
self.horizon_dist = Config.horizon_dist
self.initial_min_dist = Config.initial_min_dist
self.goal_radius = Config.goal_radius
self.min_speed = Config.min_speed
self.max_speed = Config.max_speed
def reset(self):
# ownship = recordtype('ownship', ['position', 'velocity', 'speed', 'heading', 'bank'])
# intruder = recordtype('intruder', ['id', 'position', 'velocity'])
# goal = recordtype('goal', ['position'])
# initiate ownship to control
self.drone = Ownship(
position=(50, 50),
speed=self.min_speed,
heading=math.pi/4
)
# randomly generate intruder aircraft and store them in a list
self.intruder_list = []
for _ in range(self.intruder_size):
intruder = Aircraft(
position=self.random_pos(),
speed=self.random_speed(),
heading=self.random_heading(),
)
# new intruder aircraft can appear too close to ownship
while dist(self.drone, intruder) < self.initial_min_dist:
intruder.position = self.random_pos()
self.intruder_list.append(intruder)
# generate a random goal
self.goal = Goal(position=self.random_pos())
# reset the number of conflicts to 0
self.no_conflict = 0
return self._get_ob()
def _get_ob(self):
# state contains pos, vel for all intruder aircraft
# pos, vel, speed, heading for ownship
# goal pos
def normalize_velocity(velocity):
translation = velocity + self.max_speed
return translation / (self.max_speed * 2)
s = []
for aircraft in self.intruder_list:
# (x, y, vx, vy)
s.append(aircraft.position[0] / Config.window_width)
s.append(aircraft.position[1] / Config.window_height)
s.append(normalize_velocity(aircraft.velocity[0]))
s.append(normalize_velocity(aircraft.velocity[1]))
for i in range(1):
# (x, y, vx, vy, speed, heading)
s.append(self.drone.position[0] / Config.window_width)
s.append(self.drone.position[1] / Config.window_height)
s.append(normalize_velocity(self.drone.velocity[0]))
s.append(normalize_velocity(self.drone.velocity[1]))
s.append((self.drone.speed - Config.min_speed) / (Config.max_speed - Config.min_speed))
s.append(self.drone.heading / (2 * math.pi))
s.append(self.goal.position[0] / Config.window_width)
s.append(self.goal.position[1] / Config.window_height)
return np.array(s)
def step(self, action):
# map 0~8 to 3x3 action space
a = np.zeros(2)
a[0] = action // 3
a[1] = action % 3
action = a
# assert self.action_space.contains(action), 'given action is in incorrect shape'
# next state of ownship
self.drone.step(action)
reward, terminal, info = self._terminal_reward()
return self._get_ob(), reward, terminal, info
def _terminal_reward(self):
# step the intruder aircraft
conflict = False
# for each aircraft
for idx in range(self.intruder_size):
intruder = self.intruder_list[idx]
intruder.position += intruder.velocity
dist_intruder = dist(self.drone, intruder)
# if this intruder out of map
if not self.position_range.contains(intruder.position):
self.intruder_list[idx] = self.reset_intruder()
# if there is a conflict
if dist_intruder < self.minimum_separation:
conflict = True
# if conflict status transition from False to True, increase number of conflicts by 1
# if conflict status is True, monitor when this conflict status will be escaped
if intruder.conflict == False:
self.no_conflict += 1
intruder.conflict = True
else:
if not dist_intruder < self.minimum_separation:
intruder.conflict = False
# if there is a near-mid-air-collision
if dist_intruder < self.NMAC_dist:
return -20, True, 'n' # NMAC
# if there is conflict
if conflict:
return -5, False, 'c' # conflict
# if ownship out of map
# if not self.position_range.contains(self.drone.position):
# return -100, True, 'w' # out-of-map
# if ownship reaches goal
if dist(self.drone, self.goal) < self.goal_radius:
return 10, True, 'g' # goal
return -dist(self.drone, self.goal)/1200, False, ''
return 0, False, ''
def render(self, mode='human'):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(self.window_width, self.window_height)
self.viewer.set_bounds(0, self.window_width, 0, self.window_height)
if self.drone is None:
return None
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# draw ownship
ownship_img = rendering.Image(os.path.join(__location__, 'images/aircraft.png'), 32, 32)
jtransform = rendering.Transform(rotation=self.drone.heading - math.pi/2, translation=self.drone.position)
ownship_img.add_attr(jtransform)
ownship_img.set_color(255, 241, 4) # set it to yellow
self.viewer.onetime_geoms.append(ownship_img)
# draw goal
goal_img = rendering.Image(os.path.join(__location__, 'images/goal.png'), 32, 32)
jtransform = rendering.Transform(rotation=0, translation=self.goal.position)
goal_img.add_attr(jtransform)
goal_img.set_color(15, 210, 81) # green
self.viewer.onetime_geoms.append(goal_img)
# draw intruders
for aircraft in self.intruder_list:
intruder_img = rendering.Image(os.path.join(__location__, 'images/intruder.png'), 32, 32)
jtransform = rendering.Transform(rotation=aircraft.heading - math.pi/2, translation=aircraft.position)
intruder_img.add_attr(jtransform)
intruder_img.set_color(237, 26, 32) # red color
self.viewer.onetime_geoms.append(intruder_img)
return self.viewer.render(return_rgb_array=False)
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
# reset pos, vel, heading of this aircraft
def reset_intruder(self):
intruder = Aircraft(
position=self.random_pos(),
speed=self.random_speed(),
heading=self.random_heading(),
)
while dist(self.drone, intruder) < self.initial_min_dist:
intruder.position = self.random_pos()
return intruder
def random_pos(self):
return np.random.uniform(
low=np.array([0, 0]),
high=np.array([self.window_width, self.window_height])
)
def random_speed(self):
return np.random.uniform(low=self.min_speed, high=self.max_speed)
def random_heading(self):
return np.random.uniform(low=0, high=2*math.pi)
def build_observation_space(self):
s = spaces.Dict({
'own_x': spaces.Box(low=0, high=self.window_width, dtype=np.float32),
'own_y': spaces.Box(low=0, high=self.window_height, dtype=np.float32),
'pos_x': spaces.Box(low=0, high=self.window_width, dtype=np.float32),
'pos_y': spaces.Box(low=0, high=self.window_height, dtype=np.float32),
'heading': spaces.Box(low=0, high=2*math.pi, dtype=np.float32),
'speed': spaces.Box(low=self.min_speed, high=self.max_speed, dtype=np.float32),
})
return s
class Goal:
def __init__(self, position):
self.position = position
class Aircraft:
def __init__(self, position, speed, heading):
self.position = np.array(position, dtype=np.float32)
self.speed = speed
self.heading = heading # rad
vx = self.speed * math.cos(self.heading)
vy = self.speed * math.sin(self.heading)
self.velocity = np.array([vx, vy], dtype=np.float32)
self.conflict = False # track if this aircraft is in conflict with ownship
class Ownship(Aircraft):
def __init__(self, position, speed, heading):
Aircraft.__init__(self, position, speed, heading)
self.load_config()
def load_config(self):
self.G = Config.G
self.scale = Config.scale
self.min_speed = Config.min_speed
self.max_speed = Config.max_speed
self.d_speed = Config.d_speed
self.speed_sigma = Config.speed_sigma
self.position_sigma = Config.position_sigma
self.d_heading = Config.d_heading
self.heading_sigma = Config.heading_sigma
def step(self, a):
self.heading += self.d_heading * (a[0] - 1)
self.heading += np.random.normal(0, self.heading_sigma)
self.speed += self.d_speed * (a[1] - 1)
self.speed = max(self.min_speed, min(self.speed, self.max_speed)) # project to range
self.speed += np.random.normal(0, self.speed_sigma)
vx = self.speed * math.cos(self.heading)
vy = self.speed * math.sin(self.heading)
self.velocity = np.array([vx, vy])
self.position += self.velocity
def dist(object1, object2):
return np.linalg.norm(object1.position - object2.position)
| [
"numpy.random.normal",
"os.path.join",
"gym.spaces.Discrete",
"gym.spaces.Box",
"math.cos",
"numpy.array",
"numpy.zeros",
"gym.envs.classic_control.rendering.Transform",
"sys.path.extend",
"numpy.random.uniform",
"gym.envs.classic_control.rendering.Viewer",
"numpy.linalg.norm",
"os.getcwd",
... | [((107, 176), 'sys.path.extend', 'sys.path.extend', (["['../../../gym-guidance-collision-avoidance-single']"], {}), "(['../../../gym-guidance-collision-avoidance-single'])\n", (122, 176), False, 'import sys\n'), ((11829, 11880), 'numpy.linalg.norm', 'np.linalg.norm', (['(object1.position - object2.position)'], {}), '(object1.position - object2.position)\n', (11843, 11880), True, 'import numpy as np\n'), ((1321, 1397), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1000)', 'high': '(1000)', 'shape': '(state_dimension,)', 'dtype': 'np.float32'}), '(low=-1000, high=1000, shape=(state_dimension,), dtype=np.float32)\n', (1331, 1397), False, 'from gym import spaces\n'), ((1426, 1444), 'gym.spaces.Discrete', 'spaces.Discrete', (['(9)'], {}), '(9)\n', (1441, 1444), False, 'from gym import spaces\n'), ((1704, 1727), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1721, 1727), False, 'from gym.utils import seeding\n'), ((4874, 4885), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (4882, 4885), True, 'import numpy as np\n'), ((4965, 4976), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4973, 4976), True, 'import numpy as np\n'), ((7641, 7741), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'rotation': '(self.drone.heading - math.pi / 2)', 'translation': 'self.drone.position'}), '(rotation=self.drone.heading - math.pi / 2, translation=\n self.drone.position)\n', (7660, 7741), False, 'from gym.envs.classic_control import rendering\n'), ((8025, 8088), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'rotation': '(0)', 'translation': 'self.goal.position'}), '(rotation=0, translation=self.goal.position)\n', (8044, 8088), False, 'from gym.envs.classic_control import rendering\n'), ((9444, 9502), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.min_speed', 'high': 'self.max_speed'}), '(low=self.min_speed, high=self.max_speed)\n', (9461, 9502), True, 'import numpy as np\n'), ((9549, 9591), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(2 * math.pi)'}), '(low=0, high=2 * math.pi)\n', (9566, 9591), True, 'import numpy as np\n'), ((10355, 10391), 'numpy.array', 'np.array', (['position'], {'dtype': 'np.float32'}), '(position, dtype=np.float32)\n', (10363, 10391), True, 'import numpy as np\n'), ((10579, 10615), 'numpy.array', 'np.array', (['[vx, vy]'], {'dtype': 'np.float32'}), '([vx, vy], dtype=np.float32)\n', (10587, 10615), True, 'import numpy as np\n'), ((11365, 11404), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.heading_sigma'], {}), '(0, self.heading_sigma)\n', (11381, 11404), True, 'import numpy as np\n'), ((11569, 11606), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.speed_sigma'], {}), '(0, self.speed_sigma)\n', (11585, 11606), True, 'import numpy as np\n'), ((11730, 11748), 'numpy.array', 'np.array', (['[vx, vy]'], {}), '([vx, vy])\n', (11738, 11748), True, 'import numpy as np\n'), ((7194, 7249), 'gym.envs.classic_control.rendering.Viewer', 'rendering.Viewer', (['self.window_width', 'self.window_height'], {}), '(self.window_width, self.window_height)\n', (7210, 7249), False, 'from gym.envs.classic_control import rendering\n'), ((7561, 7610), 'os.path.join', 'os.path.join', (['__location__', '"""images/aircraft.png"""'], {}), "(__location__, 'images/aircraft.png')\n", (7573, 7610), False, 'import os\n'), ((7949, 7994), 'os.path.join', 'os.path.join', (['__location__', '"""images/goal.png"""'], {}), "(__location__, 'images/goal.png')\n", (7961, 7994), False, 'import os\n'), ((8424, 8520), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'rotation': '(aircraft.heading - math.pi / 2)', 'translation': 'aircraft.position'}), '(rotation=aircraft.heading - math.pi / 2, translation=\n aircraft.position)\n', (8443, 8520), False, 'from gym.envs.classic_control import rendering\n'), ((10483, 10505), 'math.cos', 'math.cos', (['self.heading'], {}), '(self.heading)\n', (10491, 10505), False, 'import math\n'), ((10532, 10554), 'math.sin', 'math.sin', (['self.heading'], {}), '(self.heading)\n', (10540, 10554), False, 'import math\n'), ((11634, 11656), 'math.cos', 'math.cos', (['self.heading'], {}), '(self.heading)\n', (11642, 11656), False, 'import math\n'), ((11683, 11705), 'math.sin', 'math.sin', (['self.heading'], {}), '(self.heading)\n', (11691, 11705), False, 'import math\n'), ((1503, 1519), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1511, 1519), True, 'import numpy as np\n'), ((1538, 1587), 'numpy.array', 'np.array', (['[self.window_width, self.window_height]'], {}), '([self.window_width, self.window_height])\n', (1546, 1587), True, 'import numpy as np\n'), ((7458, 7469), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7467, 7469), False, 'import os\n'), ((7471, 7496), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7486, 7496), False, 'import os\n'), ((8340, 8389), 'os.path.join', 'os.path.join', (['__location__', '"""images/intruder.png"""'], {}), "(__location__, 'images/intruder.png')\n", (8352, 8389), False, 'import os\n'), ((9305, 9321), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9313, 9321), True, 'import numpy as np\n'), ((9340, 9389), 'numpy.array', 'np.array', (['[self.window_width, self.window_height]'], {}), '([self.window_width, self.window_height])\n', (9348, 9389), True, 'import numpy as np\n'), ((9677, 9736), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': 'self.window_width', 'dtype': 'np.float32'}), '(low=0, high=self.window_width, dtype=np.float32)\n', (9687, 9736), False, 'from gym import spaces\n'), ((9759, 9819), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': 'self.window_height', 'dtype': 'np.float32'}), '(low=0, high=self.window_height, dtype=np.float32)\n', (9769, 9819), False, 'from gym import spaces\n'), ((9842, 9901), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': 'self.window_width', 'dtype': 'np.float32'}), '(low=0, high=self.window_width, dtype=np.float32)\n', (9852, 9901), False, 'from gym import spaces\n'), ((9924, 9984), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': 'self.window_height', 'dtype': 'np.float32'}), '(low=0, high=self.window_height, dtype=np.float32)\n', (9934, 9984), False, 'from gym import spaces\n'), ((10009, 10062), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(2 * math.pi)', 'dtype': 'np.float32'}), '(low=0, high=2 * math.pi, dtype=np.float32)\n', (10019, 10062), False, 'from gym import spaces\n'), ((10083, 10152), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.min_speed', 'high': 'self.max_speed', 'dtype': 'np.float32'}), '(low=self.min_speed, high=self.max_speed, dtype=np.float32)\n', (10093, 10152), False, 'from gym import spaces\n')] |
#!/usr/bin/env python
import gzip
import pandas as pd
from fact.io import write_data
import click
import logging
import numpy as np
import os
from tqdm import tqdm
from gridmap import Job, process_jobs
logging.basicConfig(format='%(asctime)s|%(levelname)s|%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
log = logging.getLogger(__name__)
def run(infile_path, keys, outkey):
'''
This is what will be executed on the cluster
'''
logger = logging.getLogger(__name__)
logger.info("stream runner has been started.")
open_func = open
log.info("reading: {}".format(infile_path))
if infile_path.endswith(".gz"):
open_func = gzip.open
dfs = []
lines = 0
with open_func(infile_path) as f:
for line in f:
df = pd.read_json(line)
df = df[default_keys_to_store]
dfs.append(df)
lines+=1
res = pd.concat(dfs)
res["infile_path"] = infile_path
pre, ext = os.path.splitext(infile_path)
if infile_path.endswith(".gz"):
pre, ext = os.path.splitext(pre)
outfile = pre + ".hdf5"
write_data(res, outfile, key=outkey, mode="w", compression="gzip", compression_opts=9)
logger.info("extracted {} thresholds from {} lines".format(len(res), lines))
return res[res.duplicated([ 'event_num', 'run_id', 'night', 'infile_path'], keep='first') == False]
def make_jobs(infiles, keys, outkey, engine, queue, vmem, walltime, num_runs):
jobs = []
logger = logging.getLogger(__name__)
logger.info("queue: {}".format(queue))
logger.info("walltime: {}".format(walltime))
logger.info("engine: {}".format(engine))
logger.info("mem_free: {}mb".format(vmem))
for num, infile in enumerate(infiles):
jobs.append(
Job(run,
[infile, keys, outkey],
queue=queue,
walltime=walltime,
engine=engine,
name="{}_ratescan_convert".format(num),
mem_free='{}mb'.format(vmem)
)
)
return jobs
ratescan_keys = [
'ratescan_trigger_counts',
# 'ratescan_trigger_slices',
# 'ratescan_trigger_primitives',
'ratescan_trigger_thresholds'
]
meta_keys = [
'event_num',
'trigger_type',
# 'num_pixel',
'run_id',
'night',
# 'roi',
'timestamp'
]
pointing_keys = [
'source_position_zd', 'source_position_az', 'aux_pointing_position_zd', 'aux_pointing_position_az', 'pointing_position_zd', 'pointing_position_az'
]
pedestal_keys = [
'ped_mean_median',
# 'ped_mean_p25',
# 'ped_mean_p75',
'ped_mean_mean',
# 'ped_mean_max',
# 'ped_mean_min',
'ped_var_median',
# 'ped_var_p25',
# 'ped_var_p75',
'ped_var_mean',
# 'ped_var_max',
# 'ped_var_min',
'ped_var_variance',
]
default_keys_to_store = ratescan_keys + meta_keys + pedestal_keys
@click.command()
@click.argument('infiles', nargs=-1, type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True) )
@click.argument('outfile', type=click.Path(exists=False, dir_okay=False, file_okay=True, readable=True) )
@click.option('--key', help='Key of the data base in the hdf file', default="ratescan")
@click.option('--queue', help='Name of the queue you want to send jobs to.', default='one_day')
@click.option('--walltime', help='Estimated maximum walltime of your job in format hh:mm:ss.', default='02:00:00')
@click.option('--engine', help='Name of the grid engine used by the cluster.', type=click.Choice(['PBS', 'SGE',]), default='PBS')
@click.option('--num_runs', help='Number of num runs per bunch to start on the cluster.', default='4', type=click.INT)
@click.option('--vmem', help='Amount of memory to use per node in MB.', default='10000', type=click.INT)
@click.option('--chunksize', help='number of simultaneus submitted jobs.', default='0', type=click.INT)
@click.option('--log_level', type=click.Choice(['INFO', 'DEBUG', 'WARN']), help='increase output verbosity', default='INFO')
@click.option("--log_dir", type=click.Path(exists=False, dir_okay=True, file_okay=False, readable=True), help='Directory to store output from m gridmap jobs', default=None)
@click.option('--port', help='The port through which to communicate with the JobMonitor', default=None, type=int)
@click.option('--local', default=False,is_flag=True, help='Flag indicating whether jobs should be executed localy .')
def main(infiles, outfile, key, queue, walltime, engine, num_runs, vmem, chunksize, log_level, log_dir, port, local):
"""
run over list of jsonl files convert each line to pandas df and dump it to HDF5
"""
log.info("Putting ratescans from json files into hdf5 file")
open_func = open
if chunksize > 0:
partitions = np.array_split(infiles, 1+len(infiles)//chunksize)
else:
partitions = np.array_split(infiles, 1)
for infile in partitions:
jobs = make_jobs(infile, default_keys_to_store, key, engine, queue, vmem, walltime, num_runs)
log.info("Submitting {} jobs".format(len(jobs)))
job_arguments = dict(
jobs=jobs,
max_processes=len(jobs),
local=local,
)
if port:
job_arguments["port"] = port
if log_dir:
job_arguments["temp_dir"] = log_dir
job_outputs = process_jobs(**job_arguments)
for df in tqdm(job_outputs):
write_data(df, outfile, key=key, mode="a")
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"click.Choice",
"click.option",
"tqdm.tqdm",
"os.path.splitext",
"numpy.array_split",
"fact.io.write_data",
"click.Path",
"gridmap.process_jobs",
"click.command",
"pandas.concat",
"pandas.read_json"
] | [((204, 328), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s|%(levelname)s|%(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""', 'level': 'logging.INFO'}), "(format='%(asctime)s|%(levelname)s|%(message)s', datefmt\n ='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n", (223, 328), False, 'import logging\n'), ((350, 377), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (367, 377), False, 'import logging\n'), ((2944, 2959), 'click.command', 'click.command', ([], {}), '()\n', (2957, 2959), False, 'import click\n'), ((3182, 3273), 'click.option', 'click.option', (['"""--key"""'], {'help': '"""Key of the data base in the hdf file"""', 'default': '"""ratescan"""'}), "('--key', help='Key of the data base in the hdf file', default=\n 'ratescan')\n", (3194, 3273), False, 'import click\n'), ((3270, 3368), 'click.option', 'click.option', (['"""--queue"""'], {'help': '"""Name of the queue you want to send jobs to."""', 'default': '"""one_day"""'}), "('--queue', help='Name of the queue you want to send jobs to.',\n default='one_day')\n", (3282, 3368), False, 'import click\n'), ((3366, 3489), 'click.option', 'click.option', (['"""--walltime"""'], {'help': '"""Estimated maximum walltime of your job in format hh:mm:ss."""', 'default': '"""02:00:00"""'}), "('--walltime', help=\n 'Estimated maximum walltime of your job in format hh:mm:ss.', default=\n '02:00:00')\n", (3378, 3489), False, 'import click\n'), ((3611, 3737), 'click.option', 'click.option', (['"""--num_runs"""'], {'help': '"""Number of num runs per bunch to start on the cluster."""', 'default': '"""4"""', 'type': 'click.INT'}), "('--num_runs', help=\n 'Number of num runs per bunch to start on the cluster.', default='4',\n type=click.INT)\n", (3623, 3737), False, 'import click\n'), ((3730, 3837), 'click.option', 'click.option', (['"""--vmem"""'], {'help': '"""Amount of memory to use per node in MB."""', 'default': '"""10000"""', 'type': 'click.INT'}), "('--vmem', help='Amount of memory to use per node in MB.',\n default='10000', type=click.INT)\n", (3742, 3837), False, 'import click\n'), ((3835, 3941), 'click.option', 'click.option', (['"""--chunksize"""'], {'help': '"""number of simultaneus submitted jobs."""', 'default': '"""0"""', 'type': 'click.INT'}), "('--chunksize', help='number of simultaneus submitted jobs.',\n default='0', type=click.INT)\n", (3847, 3941), False, 'import click\n'), ((4237, 4359), 'click.option', 'click.option', (['"""--port"""'], {'help': '"""The port through which to communicate with the JobMonitor"""', 'default': 'None', 'type': 'int'}), "('--port', help=\n 'The port through which to communicate with the JobMonitor', default=\n None, type=int)\n", (4249, 4359), False, 'import click\n'), ((4351, 4473), 'click.option', 'click.option', (['"""--local"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Flag indicating whether jobs should be executed localy ."""'}), "('--local', default=False, is_flag=True, help=\n 'Flag indicating whether jobs should be executed localy .')\n", (4363, 4473), False, 'import click\n'), ((493, 520), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (510, 520), False, 'import logging\n'), ((940, 954), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (949, 954), True, 'import pandas as pd\n'), ((1017, 1046), 'os.path.splitext', 'os.path.splitext', (['infile_path'], {}), '(infile_path)\n', (1033, 1046), False, 'import os\n'), ((1161, 1251), 'fact.io.write_data', 'write_data', (['res', 'outfile'], {'key': 'outkey', 'mode': '"""w"""', 'compression': '"""gzip"""', 'compression_opts': '(9)'}), "(res, outfile, key=outkey, mode='w', compression='gzip',\n compression_opts=9)\n", (1171, 1251), False, 'from fact.io import write_data\n'), ((1546, 1573), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1563, 1573), False, 'import logging\n'), ((1102, 1123), 'os.path.splitext', 'os.path.splitext', (['pre'], {}), '(pre)\n', (1118, 1123), False, 'import os\n'), ((4905, 4931), 'numpy.array_split', 'np.array_split', (['infiles', '(1)'], {}), '(infiles, 1)\n', (4919, 4931), True, 'import numpy as np\n'), ((5409, 5438), 'gridmap.process_jobs', 'process_jobs', ([], {}), '(**job_arguments)\n', (5421, 5438), False, 'from gridmap import Job, process_jobs\n'), ((5458, 5475), 'tqdm.tqdm', 'tqdm', (['job_outputs'], {}), '(job_outputs)\n', (5462, 5475), False, 'from tqdm import tqdm\n'), ((3002, 3072), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)', 'file_okay': '(True)', 'readable': '(True)'}), '(exists=True, dir_okay=False, file_okay=True, readable=True)\n', (3012, 3072), False, 'import click\n'), ((3107, 3178), 'click.Path', 'click.Path', ([], {'exists': '(False)', 'dir_okay': '(False)', 'file_okay': '(True)', 'readable': '(True)'}), '(exists=False, dir_okay=False, file_okay=True, readable=True)\n', (3117, 3178), False, 'import click\n'), ((3564, 3592), 'click.Choice', 'click.Choice', (["['PBS', 'SGE']"], {}), "(['PBS', 'SGE'])\n", (3576, 3592), False, 'import click\n'), ((3972, 4011), 'click.Choice', 'click.Choice', (["['INFO', 'DEBUG', 'WARN']"], {}), "(['INFO', 'DEBUG', 'WARN'])\n", (3984, 4011), False, 'import click\n'), ((4095, 4166), 'click.Path', 'click.Path', ([], {'exists': '(False)', 'dir_okay': '(True)', 'file_okay': '(False)', 'readable': '(True)'}), '(exists=False, dir_okay=True, file_okay=False, readable=True)\n', (4105, 4166), False, 'import click\n'), ((815, 833), 'pandas.read_json', 'pd.read_json', (['line'], {}), '(line)\n', (827, 833), True, 'import pandas as pd\n'), ((5489, 5531), 'fact.io.write_data', 'write_data', (['df', 'outfile'], {'key': 'key', 'mode': '"""a"""'}), "(df, outfile, key=key, mode='a')\n", (5499, 5531), False, 'from fact.io import write_data\n')] |
"""Plot the example figure for object localisation.
<NAME> <<EMAIL>>
Research School of Astronomy and Astrophysics
The Australian National University
2017
"""
import aplpy
import astropy.io.fits
import matplotlib.patches as patches, numpy
import matplotlib
# http://bkanuka.com/articles/native-latex-plots/
def figsize(scale):
fig_width_pt = 240.0
inches_per_pt = 1.0/72.27
golden_mean = (numpy.sqrt(5.0)-1.0)/2.0
fig_width = fig_width_pt*inches_per_pt*scale
fig_height = fig_width*golden_mean
fig_size = [fig_width,fig_height]
return fig_size
pgf_with_latex = {
"pgf.texsystem": "pdflatex",
"text.usetex": True,
"font.family": "serif",
"font.serif": [],
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 10,
"font.size": 10,
"legend.fontsize": 10,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"figure.figsize": figsize(0.9),
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
]
}
matplotlib.rcParams.update(pgf_with_latex)
import matplotlib.pyplot as plt
radio_path = '../EI0093C1_radio.fits'
fig = aplpy.FITSFigure(radio_path, figsize=(5, 5))
fig.show_grayscale(stretch='arcsinh')
# fig = plt.figure(figsize=(10, 10), dpi=50)
# ax = plt.subplot2grid((3, 3), (0, 0), rowspan=2, colspan=3)
# ax.imshow(radio)
ax = plt.gca()
rect = patches.Rectangle((201-75, 10), 71, 71, edgecolor='red', linewidth=1, fill=None)
ax.add_patch(rect)
rect = patches.Rectangle((100-71/2, 100-71/2), 71, 71, edgecolor='red', linewidth=1, fill=None)
ax.add_patch(rect)
rect = patches.Rectangle((5, 50), 71, 71, edgecolor='red', linewidth=1, fill=None)
ax.add_patch(rect)
plt.savefig('../images/localisation-example.pdf')
# plt.axis('off')
# plt.title('a)')
# ax = plt.subplot2grid((3, 3), (2, 0))
# ax.imshow(radio[10:10+87, 5:5+87], cmap='Greys', vmin=radio.min(), vmax=radio.max())
# plt.title('b)')
# plt.text(44, 110, '$p = 0.01$', fontsize=35, horizontalalignment='center')
# for axis in ['top','bottom','left','right']:
# plt.gca().spines[axis].set_linewidth(5)
# plt.gca().spines[axis].set_edgecolor('red')
# plt.tick_params(
# axis='both',
# which='both',
# bottom='off',
# top='off',
# left='off',
# right='off',
# labelbottom='off',
# labelleft='off')
# ax = plt.subplot2grid((3, 3), (2, 1))
# plt.title('c)')
# ax.imshow(radio[170:170+87, 30:30+87], cmap='Greys', vmin=radio.min(), vmax=radio.max())
# plt.text(44, 110, '$p = 0.48$', fontsize=35, horizontalalignment='center')
# for axis in ['top','bottom','left','right']:
# plt.gca().spines[axis].set_linewidth(5)
# plt.gca().spines[axis].set_edgecolor('red')
# plt.tick_params(
# axis='both',
# which='both',
# bottom='off',
# top='off',
# left='off',
# right='off',
# labelbottom='off',
# labelleft='off')
# ax = plt.subplot2grid((3, 3), (2, 2))
# plt.title('d)')
# ax.imshow(radio[137:137+87, 137:137+87], cmap='Greys', vmin=radio.min(), vmax=radio.max())
# plt.text(44, 110, '$p = 0.99$', fontsize=35, horizontalalignment='center')
# for axis in ['top','bottom','left','right']:
# plt.gca().spines[axis].set_linewidth(5)
# plt.gca().spines[axis].set_edgecolor('red')
# plt.tick_params(
# axis='both',
# which='both',
# bottom='off',
# top='off',
# left='off',
# right='off',
# labelbottom='off',
# labelleft='off')
# plt.tight_layout()
# plt.subplots_adjust(hspace=0.2, bottom=0.1)
# plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/windows.pdf')
# plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/windows.eps')
# plt.show()
| [
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.rcParams.update",
"matplotlib.pyplot.gca",
"aplpy.FITSFigure"
] | [((1029, 1071), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (['pgf_with_latex'], {}), '(pgf_with_latex)\n', (1055, 1071), False, 'import matplotlib\n'), ((1149, 1193), 'aplpy.FITSFigure', 'aplpy.FITSFigure', (['radio_path'], {'figsize': '(5, 5)'}), '(radio_path, figsize=(5, 5))\n', (1165, 1193), False, 'import aplpy\n'), ((1363, 1372), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1370, 1372), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1466), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(201 - 75, 10)', '(71)', '(71)'], {'edgecolor': '"""red"""', 'linewidth': '(1)', 'fill': 'None'}), "((201 - 75, 10), 71, 71, edgecolor='red', linewidth=1,\n fill=None)\n", (1397, 1466), True, 'import matplotlib.patches as patches, numpy\n'), ((1487, 1587), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(100 - 71 / 2, 100 - 71 / 2)', '(71)', '(71)'], {'edgecolor': '"""red"""', 'linewidth': '(1)', 'fill': 'None'}), "((100 - 71 / 2, 100 - 71 / 2), 71, 71, edgecolor='red',\n linewidth=1, fill=None)\n", (1504, 1587), True, 'import matplotlib.patches as patches, numpy\n'), ((1602, 1677), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(5, 50)', '(71)', '(71)'], {'edgecolor': '"""red"""', 'linewidth': '(1)', 'fill': 'None'}), "((5, 50), 71, 71, edgecolor='red', linewidth=1, fill=None)\n", (1619, 1677), True, 'import matplotlib.patches as patches, numpy\n'), ((1697, 1746), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../images/localisation-example.pdf"""'], {}), "('../images/localisation-example.pdf')\n", (1708, 1746), True, 'import matplotlib.pyplot as plt\n'), ((405, 420), 'numpy.sqrt', 'numpy.sqrt', (['(5.0)'], {}), '(5.0)\n', (415, 420), False, 'import matplotlib.patches as patches, numpy\n')] |
import os
from rpgpy import spectra2moments
from rpgpy import spcutil
from rpgpy import read_rpg
import numpy as np
from time import time
from numpy.testing import assert_array_almost_equal
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
class TestFindPeaks:
def test_main_peak_1(self):
data = np.array([0, 0, 0, 0.3, 0, 0, 0.2, 0.3, 0.5, 0.2, 0, 0, 0, 0.2])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 6
assert ind_right == 10
def test_find_single_value(self):
data = np.array([0, 0, 0, 0.3, 0, 0])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 3
assert ind_right == 4
def test_find_left_edge(self):
data = np.array([0.1, 0.2, 0.3, 0.5, 0, 0])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 0
assert ind_right == 4
def test_find_right_edge(self):
data = np.array([0, 0.2, 0.3, 0.5, 0.4, 0.3])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 1
assert ind_right == 6 # is this OK, or should be 5 ?
def test_find_peak_with_secondary_peak(self):
data = np.array([0, 0.1, 0.3, 0.2, 0.35, 0.5, 0.3, 0.1, 0])
ind_left, ind_right = spcutil.find_peak_edges(data)
assert ind_left == 1
assert ind_right == 8
class TestMoments:
input_file = f'{FILE_PATH}/../data/level0/v3-889346/200704_000002_P10_ZEN.LV0'
header, data = read_rpg(input_file)
source_data_mean = np.mean(data['TotSpec'])
start = time()
moments = spectra2moments(data, header)
stop = time()
print('')
print(f'Time elapsed: {stop - start} seconds')
def test_header(self):
assert self.header['SWVersion'] == 525
assert self.header['FileCode'] == 889346
def test_that_does_not_alter_input_data(self):
assert_array_almost_equal(self.source_data_mean, np.mean(self.data['TotSpec']))
def test_that_we_get_the_reference_value(self):
moments = spectra2moments(self.data, self.header, n_points_min=1)
assert round(np.mean(moments['Ze'][moments['Ze'] > 0] * 1e5), 2) == 10.56
def test_that_moments_contain_no_nans(self):
for key, data in self.moments.items():
assert bool(np.isnan(data).any()) is False
def test_that_works_with_hspec(self):
moments = spectra2moments(self.data, self.header, spec_var='HSpec')
class TestSLDR:
input_file = f'{FILE_PATH}/../data/level0/v3-889346/190912_060003_P05_ZEN.LV0'
header, data = read_rpg(input_file)
def test_spectral_LDR(self):
sldr = spcutil.calc_spectral_LDR(self.header, self.data) | [
"numpy.mean",
"rpgpy.spectra2moments",
"rpgpy.spcutil.calc_spectral_LDR",
"rpgpy.spcutil.find_peak_edges",
"os.path.realpath",
"numpy.array",
"numpy.isnan",
"rpgpy.read_rpg",
"time.time"
] | [((219, 245), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (235, 245), False, 'import os\n'), ((1517, 1537), 'rpgpy.read_rpg', 'read_rpg', (['input_file'], {}), '(input_file)\n', (1525, 1537), False, 'from rpgpy import read_rpg\n'), ((1561, 1585), 'numpy.mean', 'np.mean', (["data['TotSpec']"], {}), "(data['TotSpec'])\n", (1568, 1585), True, 'import numpy as np\n'), ((1598, 1604), 'time.time', 'time', ([], {}), '()\n', (1602, 1604), False, 'from time import time\n'), ((1619, 1648), 'rpgpy.spectra2moments', 'spectra2moments', (['data', 'header'], {}), '(data, header)\n', (1634, 1648), False, 'from rpgpy import spectra2moments\n'), ((1660, 1666), 'time.time', 'time', ([], {}), '()\n', (1664, 1666), False, 'from time import time\n'), ((2596, 2616), 'rpgpy.read_rpg', 'read_rpg', (['input_file'], {}), '(input_file)\n', (2604, 2616), False, 'from rpgpy import read_rpg\n'), ((318, 382), 'numpy.array', 'np.array', (['[0, 0, 0, 0.3, 0, 0, 0.2, 0.3, 0.5, 0.2, 0, 0, 0, 0.2]'], {}), '([0, 0, 0, 0.3, 0, 0, 0.2, 0.3, 0.5, 0.2, 0, 0, 0, 0.2])\n', (326, 382), True, 'import numpy as np\n'), ((413, 442), 'rpgpy.spcutil.find_peak_edges', 'spcutil.find_peak_edges', (['data'], {}), '(data)\n', (436, 442), False, 'from rpgpy import spcutil\n'), ((557, 587), 'numpy.array', 'np.array', (['[0, 0, 0, 0.3, 0, 0]'], {}), '([0, 0, 0, 0.3, 0, 0])\n', (565, 587), True, 'import numpy as np\n'), ((618, 647), 'rpgpy.spcutil.find_peak_edges', 'spcutil.find_peak_edges', (['data'], {}), '(data)\n', (641, 647), False, 'from rpgpy import spcutil\n'), ((758, 794), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.5, 0, 0]'], {}), '([0.1, 0.2, 0.3, 0.5, 0, 0])\n', (766, 794), True, 'import numpy as np\n'), ((825, 854), 'rpgpy.spcutil.find_peak_edges', 'spcutil.find_peak_edges', (['data'], {}), '(data)\n', (848, 854), False, 'from rpgpy import spcutil\n'), ((966, 1004), 'numpy.array', 'np.array', (['[0, 0.2, 0.3, 0.5, 0.4, 0.3]'], {}), '([0, 0.2, 0.3, 0.5, 0.4, 0.3])\n', (974, 1004), True, 'import numpy as np\n'), ((1035, 1064), 'rpgpy.spcutil.find_peak_edges', 'spcutil.find_peak_edges', (['data'], {}), '(data)\n', (1058, 1064), False, 'from rpgpy import spcutil\n'), ((1222, 1274), 'numpy.array', 'np.array', (['[0, 0.1, 0.3, 0.2, 0.35, 0.5, 0.3, 0.1, 0]'], {}), '([0, 0.1, 0.3, 0.2, 0.35, 0.5, 0.3, 0.1, 0])\n', (1230, 1274), True, 'import numpy as np\n'), ((1305, 1334), 'rpgpy.spcutil.find_peak_edges', 'spcutil.find_peak_edges', (['data'], {}), '(data)\n', (1328, 1334), False, 'from rpgpy import spcutil\n'), ((2067, 2122), 'rpgpy.spectra2moments', 'spectra2moments', (['self.data', 'self.header'], {'n_points_min': '(1)'}), '(self.data, self.header, n_points_min=1)\n', (2082, 2122), False, 'from rpgpy import spectra2moments\n'), ((2418, 2475), 'rpgpy.spectra2moments', 'spectra2moments', (['self.data', 'self.header'], {'spec_var': '"""HSpec"""'}), "(self.data, self.header, spec_var='HSpec')\n", (2433, 2475), False, 'from rpgpy import spectra2moments\n'), ((2666, 2715), 'rpgpy.spcutil.calc_spectral_LDR', 'spcutil.calc_spectral_LDR', (['self.header', 'self.data'], {}), '(self.header, self.data)\n', (2691, 2715), False, 'from rpgpy import spcutil\n'), ((1965, 1994), 'numpy.mean', 'np.mean', (["self.data['TotSpec']"], {}), "(self.data['TotSpec'])\n", (1972, 1994), True, 'import numpy as np\n'), ((2144, 2196), 'numpy.mean', 'np.mean', (["(moments['Ze'][moments['Ze'] > 0] * 100000.0)"], {}), "(moments['Ze'][moments['Ze'] > 0] * 100000.0)\n", (2151, 2196), True, 'import numpy as np\n'), ((2326, 2340), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (2334, 2340), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import Counter
import numpy as np
import pandas as pd
from imblearn.over_sampling import RandomOverSampler
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import Dataset
POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class ImageEncoder(nn.Module):
def __init__(self, args):
super().__init__()
model = torchvision.models.resnet152(pretrained=True)
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
ct = 0
for child in self.model.children():
ct += 1
if ct > 120:
for param in child.parameters():param.requires_grad = False
self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds])
def forward(self, x):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
class JsonlDataset(Dataset):
def __init__(self, data_path, tokenizer, transforms, labels, max_seq_length):
if "train" in data_path:
Data = [[json.loads(l)] for l in open(data_path)]
oversample = RandomOverSampler(sampling_strategy='minority')
temp_labels = [item[0]["label"] for item in Data]
print(len(Data),len(temp_labels))
self.data, _ = oversample.fit_resample(np.array(Data), temp_labels)
self.data = np.array(self.data).flatten()
else:
self.data = [json.loads(l) for l in open(data_path)]
self.data_dir = os.path.dirname(data_path)
self.tokenizer = tokenizer
self.labels = labels
self.n_classes = len(labels)
self.max_seq_length = max_seq_length
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, index):
sentence = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=True))
start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1]
sentence = sentence[: self.max_seq_length]
label = torch.zeros(self.n_classes)
label[[self.labels.index(tgt) for tgt in [self.data[index]["label"]]]] = 1
# print(label)
try:
image = Image.open(os.path.join(self.data_dir, self.data[index]["img"])).convert("RGB")
except Exception as e:
print(e)
image = Image.new("RGB", (600, 600), (255, 255, 255))
image = self.transforms(image)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def get_label_frequencies(self):
label_freqs = Counter()
for row in self.data:
# print(row)
label_freqs.update([row["label"]])
return label_freqs
def collate_fn(batch):
lens = [len(row["sentence"]) for row in batch]
bsz, max_seq_len = len(batch), max(lens)
mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long)
text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(batch, lens)):
text_tensor[i_batch, :length] = input_row["sentence"]
mask_tensor[i_batch, :length] = 1
img_tensor = torch.stack([row["image"] for row in batch])
tgt_tensor = torch.stack([row["label"] for row in batch])
img_start_token = torch.stack([row["image_start_token"] for row in batch])
img_end_token = torch.stack([row["image_end_token"] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def get_mmimdb_labels():
return [
0,
1,
]
def get_image_transforms():
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017],
std=[0.12221994, 0.12145835, 0.14380469],
),
]
)
| [
"torchvision.transforms.CenterCrop",
"json.loads",
"torch.nn.Sequential",
"PIL.Image.new",
"torch.stack",
"os.path.join",
"collections.Counter",
"os.path.dirname",
"imblearn.over_sampling.RandomOverSampler",
"torchvision.models.resnet152",
"numpy.array",
"torch.nn.AdaptiveAvgPool2d",
"torchv... | [((3986, 4033), 'torch.zeros', 'torch.zeros', (['bsz', 'max_seq_len'], {'dtype': 'torch.long'}), '(bsz, max_seq_len, dtype=torch.long)\n', (3997, 4033), False, 'import torch\n'), ((4052, 4099), 'torch.zeros', 'torch.zeros', (['bsz', 'max_seq_len'], {'dtype': 'torch.long'}), '(bsz, max_seq_len, dtype=torch.long)\n', (4063, 4099), False, 'import torch\n'), ((4292, 4336), 'torch.stack', 'torch.stack', (["[row['image'] for row in batch]"], {}), "([row['image'] for row in batch])\n", (4303, 4336), False, 'import torch\n'), ((4354, 4398), 'torch.stack', 'torch.stack', (["[row['label'] for row in batch]"], {}), "([row['label'] for row in batch])\n", (4365, 4398), False, 'import torch\n'), ((4421, 4477), 'torch.stack', 'torch.stack', (["[row['image_start_token'] for row in batch]"], {}), "([row['image_start_token'] for row in batch])\n", (4432, 4477), False, 'import torch\n'), ((4498, 4552), 'torch.stack', 'torch.stack', (["[row['image_end_token'] for row in batch]"], {}), "([row['image_end_token'] for row in batch])\n", (4509, 4552), False, 'import torch\n'), ((1183, 1228), 'torchvision.models.resnet152', 'torchvision.models.resnet152', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1211, 1228), False, 'import torchvision\n'), ((1296, 1319), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (1309, 1319), True, 'import torch.nn as nn\n'), ((1728, 1759), 'torch.flatten', 'torch.flatten', (['out'], {'start_dim': '(2)'}), '(out, start_dim=2)\n', (1741, 1759), False, 'import torch\n'), ((2474, 2500), 'os.path.dirname', 'os.path.dirname', (['data_path'], {}), '(data_path)\n', (2489, 2500), False, 'import os\n'), ((3037, 3064), 'torch.zeros', 'torch.zeros', (['self.n_classes'], {}), '(self.n_classes)\n', (3048, 3064), False, 'import torch\n'), ((3707, 3716), 'collections.Counter', 'Counter', ([], {}), '()\n', (3714, 3716), False, 'from collections import Counter\n'), ((1525, 1587), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['POOLING_BREAKDOWN[args.num_image_embeds]'], {}), '(POOLING_BREAKDOWN[args.num_image_embeds])\n', (1545, 1587), True, 'import torch.nn as nn\n'), ((2080, 2127), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'sampling_strategy': '"""minority"""'}), "(sampling_strategy='minority')\n", (2097, 2127), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((4797, 4819), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (4814, 4819), True, 'import torchvision.transforms as transforms\n'), ((4833, 4859), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4854, 4859), True, 'import torchvision.transforms as transforms\n'), ((4873, 4894), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4892, 4894), True, 'import torchvision.transforms as transforms\n'), ((4908, 5018), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.46777044, 0.44531429, 0.40661017]', 'std': '[0.12221994, 0.12145835, 0.14380469]'}), '(mean=[0.46777044, 0.44531429, 0.40661017], std=[\n 0.12221994, 0.12145835, 0.14380469])\n', (4928, 5018), True, 'import torchvision.transforms as transforms\n'), ((2287, 2301), 'numpy.array', 'np.array', (['Data'], {}), '(Data)\n', (2295, 2301), True, 'import numpy as np\n'), ((2409, 2422), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (2419, 2422), False, 'import json\n'), ((3356, 3401), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(600, 600)', '(255, 255, 255)'], {}), "('RGB', (600, 600), (255, 255, 255))\n", (3365, 3401), False, 'from PIL import Image\n'), ((2014, 2027), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (2024, 2027), False, 'import json\n'), ((2340, 2359), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (2348, 2359), True, 'import numpy as np\n'), ((3215, 3267), 'os.path.join', 'os.path.join', (['self.data_dir', "self.data[index]['img']"], {}), "(self.data_dir, self.data[index]['img'])\n", (3227, 3267), False, 'import os\n')] |
import numpy as np
from scipy.ndimage import morphological_gradient
def _is_iterable(x):
try:
iter(x)
except TypeError:
return False
else:
return True
def _norm_along_last_axis(x):
"""Compute the norm of x along the last axis.
"""
return np.sqrt(np.sum(np.square(x), axis=x.ndim - 1))
def _compute_set_distances(nonzeros_1, nonzeros_2):
"""Compute all surface distances from one set to the other.
"""
distances = np.zeros(len(nonzeros_1))
for i, _ in enumerate(distances):
distances[i] = np.min(
_norm_along_last_axis(nonzeros_1[i].reshape(1, -1) - nonzeros_2)
)
return distances
def compute_surface_distances(mask1, mask2, voxel_dimensions=1):
"""Return the surface distances for all points on the surface of mask1 to the surface of mask2.
Arguments
---------
mask1 : np.ndarray
Boolean mask to compute distances from
mask2 : np.ndarray
Boolean mask to compute distances to
voxel_dimensions : iterable or numeric
Voxel size, for anisotropic voxels, use an iterable of same length as the image dimensions.
"""
structuring_el_size = tuple(3 for _ in mask1.shape)
grad1 = morphological_gradient(mask1.astype(int), size=structuring_el_size)
grad2 = morphological_gradient(mask2.astype(int), size=structuring_el_size)
if not _is_iterable(voxel_dimensions):
voxel_dimensions = [voxel_dimensions for _ in mask1.shape]
voxel_dimensions = np.array(voxel_dimensions).reshape(1, -1)
nonzeros_1 = np.array(np.nonzero(grad1)).T * voxel_dimensions
nonzeros_2 = np.array(np.nonzero(grad2)).T * voxel_dimensions
return np.sort(_compute_set_distances(nonzeros_1, nonzeros_2))
def compute_labelled_surface_distances(
labelled_1, labelled_2, num_labels_1, num_labels_2, voxel_dimensions=1
):
"""Compute the surface distances for for all connected components in one mask to the whole second mask.
"""
mask1 = labelled_1 != 0
mask2 = labelled_2 != 0
surface_distance_label_1 = []
for idx in range(num_labels_1):
surface_distance_label_1.append(
compute_surface_distances(labelled_1 == idx + 1, mask2, voxel_dimensions)
)
surface_distance_label_2 = []
for idx in range(num_labels_2):
surface_distance_label_2.append(
compute_surface_distances(labelled_2 == idx + 1, mask1, voxel_dimensions)
)
return surface_distance_label_1, surface_distance_label_2
def compute_object_percentile_surface_distances(
labelled_surface_distances_1, labelled_surface_distances_2, percentile
):
"""Compute the Hausdorff distance for for all connected components in one mask to the whole second mask.
"""
hausdorffs_label_1 = []
for surface_distance in labelled_surface_distances_1:
hausdorffs_label_1.append(np.percentile(surface_distance, percentile))
hausdorffs_label_2 = []
for surface_distance in labelled_surface_distances_2:
hausdorffs_label_2.append(np.percentile(surface_distance, percentile))
return np.array(hausdorffs_label_1), np.array(hausdorffs_label_2)
def compute_overall_percentile_surface_distances(
labelled_surface_distances_1, labelled_surface_distances_2, percentile
):
hausdorff_1 = np.percentile(
np.concatenate(labelled_surface_distances_1), percentile
)
hausdorff_2 = np.percentile(
np.concatenate(labelled_surface_distances_2), percentile
)
return hausdorff_1, hausdorff_2
def compute_object_average_surface_distances(labelled_surface_distances_1, labelled_surface_distances_2):
"""Compute the Hausdorff distance for for all connected components in one mask to the whole second mask.
"""
asd_label_1 = []
for surface_distance in labelled_surface_distances_1:
asd_label_1.append(np.mean(surface_distance))
asd_label_2 = []
for surface_distance in labelled_surface_distances_2:
asd_label_2.append(np.mean(surface_distance))
return (
np.array(asd_label_1),
np.array(asd_label_2),
)
def compute_overall_average_surface_distances(labelled_surface_distances_1, labelled_surface_distances_2):
asd_1 = np.mean(np.concatenate(labelled_surface_distances_1))
asd_2 = np.mean(np.concatenate(labelled_surface_distances_2))
return asd_1, asd_2
| [
"numpy.mean",
"numpy.square",
"numpy.array",
"numpy.concatenate",
"numpy.nonzero",
"numpy.percentile"
] | [((3119, 3147), 'numpy.array', 'np.array', (['hausdorffs_label_1'], {}), '(hausdorffs_label_1)\n', (3127, 3147), True, 'import numpy as np\n'), ((3149, 3177), 'numpy.array', 'np.array', (['hausdorffs_label_2'], {}), '(hausdorffs_label_2)\n', (3157, 3177), True, 'import numpy as np\n'), ((3349, 3393), 'numpy.concatenate', 'np.concatenate', (['labelled_surface_distances_1'], {}), '(labelled_surface_distances_1)\n', (3363, 3393), True, 'import numpy as np\n'), ((3453, 3497), 'numpy.concatenate', 'np.concatenate', (['labelled_surface_distances_2'], {}), '(labelled_surface_distances_2)\n', (3467, 3497), True, 'import numpy as np\n'), ((4067, 4088), 'numpy.array', 'np.array', (['asd_label_1'], {}), '(asd_label_1)\n', (4075, 4088), True, 'import numpy as np\n'), ((4098, 4119), 'numpy.array', 'np.array', (['asd_label_2'], {}), '(asd_label_2)\n', (4106, 4119), True, 'import numpy as np\n'), ((4256, 4300), 'numpy.concatenate', 'np.concatenate', (['labelled_surface_distances_1'], {}), '(labelled_surface_distances_1)\n', (4270, 4300), True, 'import numpy as np\n'), ((4322, 4366), 'numpy.concatenate', 'np.concatenate', (['labelled_surface_distances_2'], {}), '(labelled_surface_distances_2)\n', (4336, 4366), True, 'import numpy as np\n'), ((305, 317), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (314, 317), True, 'import numpy as np\n'), ((1518, 1544), 'numpy.array', 'np.array', (['voxel_dimensions'], {}), '(voxel_dimensions)\n', (1526, 1544), True, 'import numpy as np\n'), ((2897, 2940), 'numpy.percentile', 'np.percentile', (['surface_distance', 'percentile'], {}), '(surface_distance, percentile)\n', (2910, 2940), True, 'import numpy as np\n'), ((3063, 3106), 'numpy.percentile', 'np.percentile', (['surface_distance', 'percentile'], {}), '(surface_distance, percentile)\n', (3076, 3106), True, 'import numpy as np\n'), ((3884, 3909), 'numpy.mean', 'np.mean', (['surface_distance'], {}), '(surface_distance)\n', (3891, 3909), True, 'import numpy as np\n'), ((4018, 4043), 'numpy.mean', 'np.mean', (['surface_distance'], {}), '(surface_distance)\n', (4025, 4043), True, 'import numpy as np\n'), ((1587, 1604), 'numpy.nonzero', 'np.nonzero', (['grad1'], {}), '(grad1)\n', (1597, 1604), True, 'import numpy as np\n'), ((1653, 1670), 'numpy.nonzero', 'np.nonzero', (['grad2'], {}), '(grad2)\n', (1663, 1670), True, 'import numpy as np\n')] |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Random samplers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
# Dependency imports
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import prefer_static as ps
# ** See PRNGS.md for more detailed discussion about this packge. **
__all__ = [
'categorical',
'fold_in',
'gamma',
'is_stateful_seed',
'normal',
'poisson',
'sanitize_seed',
'split_seed',
'shuffle',
'uniform',
'zeros_seed',
]
JAX_MODE = False
SEED_DTYPE = np.uint32 if JAX_MODE else np.int32
def zeros_seed():
return tf.constant([0, 0], dtype=SEED_DTYPE)
def is_stateful_seed(seed):
return seed is None or isinstance(seed, six.integer_types)
def sanitize_seed(seed, salt=None, name=None):
"""Map various types to a seed `Tensor`."""
if callable(seed): # e.g. SeedStream.
seed = seed()
if salt is not None and not isinstance(salt, str):
raise TypeError('`salt` must be a python `str`, got {}'.format(repr(salt)))
with tf.name_scope(name or 'sanitize_seed'):
if is_stateful_seed(seed):
if JAX_MODE:
raise ValueError(
'TFP-on-JAX requires a `jax.random.PRNGKey` `seed` arg.')
# TODO(b/147874898): Deprecate `int` seeds, migrate ints to stateless?
if salt is not None:
# Prefer to incorporate salt as a constant.
if seed is not None:
seed = int(hashlib.sha512(
str((seed, salt)).encode('utf-8')).hexdigest(), 16) % (2**31 - 1)
salt = None
# Convert "stateful-indicating" `int`/`None` seed to stateless Tensor seed
# by way of a stateful sampler.
seed = tf.random.uniform([2], seed=seed, minval=np.iinfo(SEED_DTYPE).min,
maxval=np.iinfo(SEED_DTYPE).max,
dtype=SEED_DTYPE, name='seed')
# TODO(b/159209541): Consider ignoring salts for stateless seeds, for
# performance and because using stateless seeds already requires the
# discipline of splitting.
if salt is not None:
salt = int(hashlib.sha512(str(salt).encode('utf-8')).hexdigest(), 16)
seed = fold_in(seed, salt)
return tf.convert_to_tensor(seed, dtype=SEED_DTYPE, name='seed')
def fold_in(seed, salt):
"""Folds salt into seed to form a new seed."""
if JAX_MODE:
from jax import random as jaxrand # pylint: disable=g-import-not-at-top
return jaxrand.fold_in(seed, salt & (2**32 - 1))
if isinstance(salt, (six.integer_types)):
seed = tf.bitwise.bitwise_xor(
seed, np.uint64([salt & (2**64 - 1)]).view(np.int32))
else:
seed = tf.random.experimental.stateless_fold_in(seed, salt)
return seed
def split_seed(seed, n=2, salt=None, name=None):
"""Splits a seed into `n` derived seeds.
Args:
seed: The seed to split; may be an `int`, an `(int, int) tuple`, or a
`Tensor`. `int` seeds are converted to `Tensor` seeds using
`tf.random.uniform` stateful sampling. Tuples are converted to `Tensor`.
n: The number of splits to return.
salt: Optional `str` salt to mix with the seed.
name: Optional name to scope related ops.
Returns:
seeds: If `n` is a Python `int`, a `tuple` of seed values is returned. If
`n` is an int `Tensor`, a single `Tensor` of shape `[n, 2]` is returned. A
single such seed is suitable to pass as the `seed` argument of the
`tf.random.stateless_*` ops.
"""
if not (isinstance(n, int)
or isinstance(n, np.ndarray)
or tf.is_tensor(n)): # avoid confusion with salt.
raise TypeError(
'`n` must be a python `int` or an int Tensor, got {}'.format(repr(n)))
with tf.name_scope(name or 'split_seed'):
seed = sanitize_seed(seed, salt=salt)
if JAX_MODE:
from jax import random as jaxrand # pylint: disable=g-import-not-at-top
return jaxrand.split(seed, n)
seeds = tf.random.stateless_uniform(
[n, 2], seed=seed, minval=None, maxval=None, dtype=SEED_DTYPE)
if isinstance(n, six.integer_types):
seeds = tf.unstack(seeds)
return seeds
def categorical(
logits,
num_samples,
dtype=None,
seed=None,
name=None):
"""As `tf.random.categorical`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'categorical'):
seed = sanitize_seed(seed)
return tf.random.stateless_categorical(
logits=logits, num_samples=num_samples, seed=seed, dtype=dtype)
def gamma(
shape,
alpha,
beta=None,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.gamma`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'gamma'):
seed = sanitize_seed(seed)
alpha = tf.convert_to_tensor(alpha, dtype=dtype)
beta = None if beta is None else tf.convert_to_tensor(beta, dtype=dtype)
params_shape = ps.shape(alpha)
if beta is not None:
params_shape = ps.broadcast_shape(params_shape, ps.shape(beta))
shape = ps.convert_to_shape_tensor(
shape,
dtype=getattr(params_shape, 'dtype', np.int32)) # May be TensorShape.
samples_shape = ps.concat([shape, params_shape], axis=0)
return tf.random.stateless_gamma(
shape=samples_shape, seed=seed, alpha=alpha, beta=beta, dtype=dtype)
def normal(
shape,
mean=0.0,
stddev=1.0,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.normal`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'normal'):
# TODO(b/147874898): Remove workaround for seed-sensitive tests.
if is_stateful_seed(seed):
return tf.random.normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
seed = sanitize_seed(seed)
return tf.random.stateless_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
def poisson(
shape,
lam,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.poisson`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'poisson'):
seed = sanitize_seed(seed)
lam_shape = ps.shape(lam)
sample_shape = ps.concat([shape, lam_shape], axis=0)
return tf.random.stateless_poisson(
shape=sample_shape, seed=seed, lam=lam, dtype=dtype)
def shuffle(
value,
seed=None,
name=None):
"""As `tf.random.shuffle`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'shuffle'):
seed = sanitize_seed(seed)
sortkey = tf.random.stateless_uniform(shape=[ps.shape(value)[0]], seed=seed)
return tf.gather(value, tf.argsort(sortkey))
def uniform(
shape,
minval=0,
maxval=None,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.uniform`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'uniform'):
seed = sanitize_seed(seed)
return tf.random.stateless_uniform(
shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)
| [
"numpy.iinfo",
"jax.random.fold_in",
"jax.random.split",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.random.stateless_normal",
"tensorflow_probability.python.internal.prefer_static.concat",
"numpy.uint64",
"tensorflow.compat.v2.random.stateless_categorical",
"tensorflow_probabili... | [((1357, 1394), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0]'], {'dtype': 'SEED_DTYPE'}), '([0, 0], dtype=SEED_DTYPE)\n', (1368, 1394), True, 'import tensorflow.compat.v2 as tf\n'), ((1780, 1818), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'sanitize_seed')"], {}), "(name or 'sanitize_seed')\n", (1793, 1818), True, 'import tensorflow.compat.v2 as tf\n'), ((2939, 2996), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['seed'], {'dtype': 'SEED_DTYPE', 'name': '"""seed"""'}), "(seed, dtype=SEED_DTYPE, name='seed')\n", (2959, 2996), True, 'import tensorflow.compat.v2 as tf\n'), ((3176, 3217), 'jax.random.fold_in', 'jaxrand.fold_in', (['seed', '(salt & 2 ** 32 - 1)'], {}), '(seed, salt & 2 ** 32 - 1)\n', (3191, 3217), True, 'from jax import random as jaxrand\n'), ((3378, 3430), 'tensorflow.compat.v2.random.experimental.stateless_fold_in', 'tf.random.experimental.stateless_fold_in', (['seed', 'salt'], {}), '(seed, salt)\n', (3418, 3430), True, 'import tensorflow.compat.v2 as tf\n'), ((4425, 4460), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'split_seed')"], {}), "(name or 'split_seed')\n", (4438, 4460), True, 'import tensorflow.compat.v2 as tf\n'), ((4648, 4742), 'tensorflow.compat.v2.random.stateless_uniform', 'tf.random.stateless_uniform', (['[n, 2]'], {'seed': 'seed', 'minval': 'None', 'maxval': 'None', 'dtype': 'SEED_DTYPE'}), '([n, 2], seed=seed, minval=None, maxval=None,\n dtype=SEED_DTYPE)\n', (4675, 4742), True, 'import tensorflow.compat.v2 as tf\n'), ((5017, 5053), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'categorical')"], {}), "(name or 'categorical')\n", (5030, 5053), True, 'import tensorflow.compat.v2 as tf\n'), ((5097, 5196), 'tensorflow.compat.v2.random.stateless_categorical', 'tf.random.stateless_categorical', ([], {'logits': 'logits', 'num_samples': 'num_samples', 'seed': 'seed', 'dtype': 'dtype'}), '(logits=logits, num_samples=num_samples,\n seed=seed, dtype=dtype)\n', (5128, 5196), True, 'import tensorflow.compat.v2 as tf\n'), ((5383, 5413), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'gamma')"], {}), "(name or 'gamma')\n", (5396, 5413), True, 'import tensorflow.compat.v2 as tf\n'), ((5458, 5498), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['alpha'], {'dtype': 'dtype'}), '(alpha, dtype=dtype)\n', (5478, 5498), True, 'import tensorflow.compat.v2 as tf\n'), ((5595, 5610), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', (['alpha'], {}), '(alpha)\n', (5603, 5610), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((5860, 5900), 'tensorflow_probability.python.internal.prefer_static.concat', 'ps.concat', (['[shape, params_shape]'], {'axis': '(0)'}), '([shape, params_shape], axis=0)\n', (5869, 5900), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((5912, 6011), 'tensorflow.compat.v2.random.stateless_gamma', 'tf.random.stateless_gamma', ([], {'shape': 'samples_shape', 'seed': 'seed', 'alpha': 'alpha', 'beta': 'beta', 'dtype': 'dtype'}), '(shape=samples_shape, seed=seed, alpha=alpha, beta\n =beta, dtype=dtype)\n', (5937, 6011), True, 'import tensorflow.compat.v2 as tf\n'), ((6203, 6234), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'normal')"], {}), "(name or 'normal')\n", (6216, 6234), True, 'import tensorflow.compat.v2 as tf\n'), ((6483, 6576), 'tensorflow.compat.v2.random.stateless_normal', 'tf.random.stateless_normal', ([], {'shape': 'shape', 'seed': 'seed', 'mean': 'mean', 'stddev': 'stddev', 'dtype': 'dtype'}), '(shape=shape, seed=seed, mean=mean, stddev=stddev,\n dtype=dtype)\n', (6509, 6576), True, 'import tensorflow.compat.v2 as tf\n'), ((6750, 6782), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'poisson')"], {}), "(name or 'poisson')\n", (6763, 6782), True, 'import tensorflow.compat.v2 as tf\n'), ((6831, 6844), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', (['lam'], {}), '(lam)\n', (6839, 6844), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((6864, 6901), 'tensorflow_probability.python.internal.prefer_static.concat', 'ps.concat', (['[shape, lam_shape]'], {'axis': '(0)'}), '([shape, lam_shape], axis=0)\n', (6873, 6901), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((6913, 6998), 'tensorflow.compat.v2.random.stateless_poisson', 'tf.random.stateless_poisson', ([], {'shape': 'sample_shape', 'seed': 'seed', 'lam': 'lam', 'dtype': 'dtype'}), '(shape=sample_shape, seed=seed, lam=lam, dtype=dtype\n )\n', (6940, 6998), True, 'import tensorflow.compat.v2 as tf\n'), ((7140, 7172), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'shuffle')"], {}), "(name or 'shuffle')\n", (7153, 7172), True, 'import tensorflow.compat.v2 as tf\n'), ((7525, 7557), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (["(name or 'uniform')"], {}), "(name or 'uniform')\n", (7538, 7557), True, 'import tensorflow.compat.v2 as tf\n'), ((7601, 7700), 'tensorflow.compat.v2.random.stateless_uniform', 'tf.random.stateless_uniform', ([], {'shape': 'shape', 'seed': 'seed', 'minval': 'minval', 'maxval': 'maxval', 'dtype': 'dtype'}), '(shape=shape, seed=seed, minval=minval, maxval=\n maxval, dtype=dtype)\n', (7628, 7700), True, 'import tensorflow.compat.v2 as tf\n'), ((4270, 4285), 'tensorflow.compat.v2.is_tensor', 'tf.is_tensor', (['n'], {}), '(n)\n', (4282, 4285), True, 'import tensorflow.compat.v2 as tf\n'), ((4613, 4635), 'jax.random.split', 'jaxrand.split', (['seed', 'n'], {}), '(seed, n)\n', (4626, 4635), True, 'from jax import random as jaxrand\n'), ((4803, 4820), 'tensorflow.compat.v2.unstack', 'tf.unstack', (['seeds'], {}), '(seeds)\n', (4813, 4820), True, 'import tensorflow.compat.v2 as tf\n'), ((5536, 5575), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['beta'], {'dtype': 'dtype'}), '(beta, dtype=dtype)\n', (5556, 5575), True, 'import tensorflow.compat.v2 as tf\n'), ((6349, 6428), 'tensorflow.compat.v2.random.normal', 'tf.random.normal', ([], {'shape': 'shape', 'seed': 'seed', 'mean': 'mean', 'stddev': 'stddev', 'dtype': 'dtype'}), '(shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)\n', (6365, 6428), True, 'import tensorflow.compat.v2 as tf\n'), ((7314, 7333), 'tensorflow.compat.v2.argsort', 'tf.argsort', (['sortkey'], {}), '(sortkey)\n', (7324, 7333), True, 'import tensorflow.compat.v2 as tf\n'), ((5690, 5704), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', (['beta'], {}), '(beta)\n', (5698, 5704), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((3311, 3342), 'numpy.uint64', 'np.uint64', (['[salt & 2 ** 64 - 1]'], {}), '([salt & 2 ** 64 - 1])\n', (3320, 3342), True, 'import numpy as np\n'), ((2461, 2481), 'numpy.iinfo', 'np.iinfo', (['SEED_DTYPE'], {}), '(SEED_DTYPE)\n', (2469, 2481), True, 'import numpy as np\n'), ((2525, 2545), 'numpy.iinfo', 'np.iinfo', (['SEED_DTYPE'], {}), '(SEED_DTYPE)\n', (2533, 2545), True, 'import numpy as np\n'), ((7254, 7269), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', (['value'], {}), '(value)\n', (7262, 7269), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n')] |
import glob
import cv2
import numpy as np
import torch
import pandas as pd
import queue
from pathlib import Path
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2 import model_zoo
from detectron2.data.datasets import register_coco_instances
from detectron2.data.datasets import builtin_meta
from detectron2.data import get_detection_dataset_dicts
from annolid.postprocessing.quality_control import pred_dict_to_labelme
import pycocotools.mask as mask_util
from annolid.annotation.keypoints import save_labels
from annolid.postprocessing.quality_control import TracksResults
from annolid.annotation.masks import mask_iou
from annolid.data.videos import key_frames
from torchvision.ops import nms
from annolid.data import videos
class Segmentor():
def __init__(self,
dataset_dir=None,
model_pth_path=None,
score_threshold=0.15,
overlap_threshold=0.95
) -> None:
self.dataset_dir = dataset_dir
self.score_threshold = score_threshold
self.overlap_threshold = overlap_threshold
dataset_name = Path(self.dataset_dir).stem
self.subject_queue = queue.PriorityQueue(3)
self.left_object_queue = queue.PriorityQueue(3)
self.right_object_queue = queue.PriorityQueue(3)
self.right_interact_queue = queue.PriorityQueue(3)
self.left_interact_queue = queue.PriorityQueue(3)
self.subject_instance_name = 'Mouse'
self.left_object_name = 'LeftTeaball'
self.right_object_name = 'RightTeaball'
self.left_interact_name = 'LeftInteract'
self.right_interact_name = 'RightInteract'
self.tracking_results = []
try:
register_coco_instances(f"{dataset_name}_train", {
}, f"{self.dataset_dir}/train/annotations.json", f"{self.dataset_dir}/train/")
register_coco_instances(f"{dataset_name}_valid", {
}, f"{self.dataset_dir}/valid/annotations.json", f"{self.dataset_dir}/valid/")
except AssertionError as e:
print(e)
dataset_dicts = get_detection_dataset_dicts([f"{dataset_name}_train"])
_dataset_metadata = MetadataCatalog.get(f"{dataset_name}_train")
_dataset_metadata.thing_colors = [cc['color']
for cc in builtin_meta.COCO_CATEGORIES]
num_classes = len(_dataset_metadata.thing_classes)
self.class_names = _dataset_metadata.thing_classes
self.cfg = get_cfg()
# load model config and pretrained model
self.cfg.merge_from_file(model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
))
self.cfg.MODEL.WEIGHTS = model_pth_path
self.cfg.DATASETS.TRAIN = (f"{dataset_name}_train",)
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.score_threshold
self.cfg.MODEL.DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes
self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = self.overlap_threshold
# NMS threshold used on RPN proposals
self.cfg.MODEL.RPN.NMS_THRESH = self.overlap_threshold
self.predictor = DefaultPredictor(self.cfg)
def to_labelme(self,
instances,
image_path,
height,
width):
results = self._process_instances(instances, width=width)
df_res = pd.DataFrame(results)
df_res = df_res.groupby(['instance_name'], sort=False).head(1)
results = df_res.to_dict(orient='records')
frame_label_list = []
for res in results:
label_list = pred_dict_to_labelme(res)
frame_label_list += label_list
img_ext = Path(image_path).suffix
json_path = image_path.replace(img_ext, ".json")
save_labels(json_path,
str(Path(image_path).name),
frame_label_list,
height,
width,
imageData=None,
save_image_to_json=False
)
return json_path
def on_image(self, image_path, display=True):
image = cv2.imread(image_path)
height, width, _ = image.shape
preds = self.predictor(image)
instances = preds["instances"].to('cpu')
# save json format for at least one predicted instance
if len(instances) >= 1:
self.to_labelme(instances, image_path, height, width)
if display:
viz = Visualizer(image[:, :, ::-1],
metadata=MetadataCatalog.get(
self.cfg.DATASETS.TRAIN[0]),
instance_mode=ColorMode.SEGMENTATION
)
output = viz.draw_instance_predictions(
instances
)
cv2.imshow("Frame", output.get_image()[:, :, ::-1])
cv2.waitKey(0)
def _save_pred_history(self,
out_dict,
instance_name,
instance_queue
):
if out_dict['instance_name'] == instance_name:
if instance_queue.full():
instance_high_score = instance_queue.get()
instance_queue.get()
instance_queue.put(instance_high_score)
else:
instance_queue.put(
(1-out_dict['class_score'], out_dict))
def _overlap_with_subject_instance(self, out_dict):
if self.subject_queue.qsize() == 0:
return True
subject_instance_best_score = self.subject_queue.get()
_iou = mask_iou(
subject_instance_best_score[1]['segmentation'],
out_dict['segmentation']
)
self.subject_queue.put(subject_instance_best_score)
if _iou <= 0:
return False
return True
def _overlap_with_left_object(self,
out_dict):
if self.left_object_queue.qsize() == 0:
return True
left_object_best_score = self.left_object_queue.get()
_iou = mask_iou(
left_object_best_score[1]['segmentation'],
out_dict['segmentation']
)
self.left_object_queue.put(left_object_best_score)
return _iou > 0
def _overlap_with_right_object(self,
out_dict):
if self.right_object_queue.qsize() == 0:
return True
right_object_best_score = self.right_object_queue.get()
_iou = mask_iou(
right_object_best_score[1]['segmentation'],
out_dict['segmentation']
)
self.right_object_queue.put(right_object_best_score)
return _iou > 0
def subject_overlap_with_right_object(self):
if self.right_object_queue.qsize() == 0:
return True
right_object_best_score = self.right_object_queue.get()
subject_best_score = self.subject_queue.get()
_iou = mask_iou(
right_object_best_score[1]['segmentation'],
subject_best_score[1]['segmentation']
)
self.right_object_queue.put(right_object_best_score)
self.subject_queue.put(subject_best_score)
return _iou > 0
def subject_overlap_with_left_object(self):
if self.left_object_queue.qsize() == 0:
return True
left_object_best_score = self.left_object_queue.get()
subject_best_score = self.subject_queue.get()
_iou = mask_iou(
left_object_best_score[1]['segmentation'],
subject_best_score[1]['segmentation']
)
self.left_object_queue.put(left_object_best_score)
self.subject_queue.put(subject_best_score)
return _iou > 0
def _process_instances(self,
instances,
frame_number=0,
width=None
):
results = []
out_dict = {}
num_instance = len(instances)
boxes = instances.pred_boxes.tensor
scores = instances.scores
classes = instances.pred_classes
# apply nms for all the class
_keep = nms(boxes, scores, self.overlap_threshold)
boxes = boxes[_keep]
scores = scores[_keep]
classes = classes[_keep]
boxes = boxes.numpy()
boxes = boxes.tolist()
scores = scores.tolist()
classes = classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
pred_masks = instances.pred_masks
pred_masks = pred_masks[_keep]
rles = [
mask_util.encode(
np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in pred_masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
assert len(rles) == len(boxes)
if num_instance != len(rles):
num_instance = len(rles)
print(f"{num_instance} filtered instances ")
for k in range(num_instance):
box = boxes[k]
out_dict['frame_number'] = frame_number
out_dict['x1'] = box[0]
out_dict['y1'] = box[1]
out_dict['x2'] = box[2]
out_dict['y2'] = box[3]
out_dict['cx'] = (out_dict['x1'] + out_dict['x2']) / 2
out_dict['cy'] = (out_dict['y1'] + out_dict['y2']) / 2
out_dict['instance_name'] = self.class_names[classes[k]]
out_dict['class_score'] = scores[k]
out_dict['segmentation'] = rles[k]
if scores[k] >= self.score_threshold:
out_dict['instance_name'] = TracksResults.switch_left_right(
out_dict, width=width)
if out_dict['instance_name'] == self.subject_instance_name:
self._save_pred_history(out_dict,
self.subject_instance_name,
self.subject_queue)
elif out_dict['instance_name'] == self.left_object_name:
self._save_pred_history(out_dict,
self.left_object_name,
self.left_object_queue)
elif out_dict['instance_name'] == self.right_object_name:
self._save_pred_history(out_dict,
self.right_object_name,
self.right_object_queue)
elif out_dict['instance_name'] == self.left_interact_name:
self._save_pred_history(out_dict,
self.left_interact_name,
self.left_interact_queue)
# check overlap with subject animal
if not self._overlap_with_subject_instance(out_dict):
out_dict = {}
continue
# check overlap with left object
if not self._overlap_with_left_object(out_dict):
out_dict = {}
continue
# if not self.subject_overlap_with_left_object():
# out_dict = {}
# continue
elif out_dict['instance_name'] == self.right_interact_name:
self._save_pred_history(out_dict,
self.right_interact_name,
self.left_interact_queue)
if not self._overlap_with_subject_instance(out_dict):
out_dict = {}
continue
if not self._overlap_with_right_object(out_dict):
out_dict = {}
continue
# if not self.subject_overlap_with_right_object():
# out_dict = {}
# continue
results.append(out_dict)
out_dict = {}
return results
def on_image_folder(self,
image_folder
):
imgs = glob.glob(str(Path(image_folder) / '*.jpg'))
if len(imgs) <= 0:
imgs = glob.glob(str(Path(image_folder) / '*.png'))
for img_path in imgs:
self.on_image(img_path, display=False)
def on_video(self, video_path):
if not Path(video_path).exists():
return
self.cap = cv2.VideoCapture(video_path)
num_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
out_img_dir = key_frames(video_path)
self.on_image_folder(out_img_dir)
if num_frames <= 1000 or torch.cuda.is_available():
frame_number = 0
for frame in videos.frame_from_video(self.cap, num_frames):
outputs = self.predictor(frame)
out_dict = {}
instances = outputs["instances"].to("cpu")
num_instance = len(instances)
if num_instance == 0:
out_dict['frame_number'] = frame_number
out_dict['x1'] = None
out_dict['y1'] = None
out_dict['x2'] = None
out_dict['y2'] = None
out_dict['instance_name'] = None
out_dict['class_score'] = None
out_dict['segmentation'] = None
self.tracking_results.append(out_dict)
out_dict = {}
else:
_res = self._process_instances(
instances, frame_number, width)
self.tracking_results += _res
frame_number += 1
if frame_number % 100 == 0:
print("Processing frame number: ", frame_number)
df = pd.DataFrame(self.tracking_results)
df_top = df.groupby(
['frame_number', 'instance_name'], sort=False).head(1)
tracking_results_dir = Path(self.dataset_dir).parent
tracking_results_csv = f"{str(Path(self.dataset_dir).stem)}"
tracking_results_csv += f"_{str(Path(video_path).stem)}"
tracking_results_csv += "_mask_rcnn_tracking_results_with_segmenation.csv"
df_top.to_csv(str(tracking_results_dir / tracking_results_csv))
print(f"Done. Please check you results in folder: {out_img_dir}")
return out_img_dir
| [
"numpy.array",
"detectron2.model_zoo.get_config_file",
"torch.cuda.is_available",
"annolid.data.videos.key_frames",
"detectron2.config.get_cfg",
"detectron2.data.datasets.register_coco_instances",
"pathlib.Path",
"annolid.annotation.masks.mask_iou",
"torchvision.ops.nms",
"annolid.data.videos.fram... | [((1326, 1348), 'queue.PriorityQueue', 'queue.PriorityQueue', (['(3)'], {}), '(3)\n', (1345, 1348), False, 'import queue\n'), ((1382, 1404), 'queue.PriorityQueue', 'queue.PriorityQueue', (['(3)'], {}), '(3)\n', (1401, 1404), False, 'import queue\n'), ((1439, 1461), 'queue.PriorityQueue', 'queue.PriorityQueue', (['(3)'], {}), '(3)\n', (1458, 1461), False, 'import queue\n'), ((1498, 1520), 'queue.PriorityQueue', 'queue.PriorityQueue', (['(3)'], {}), '(3)\n', (1517, 1520), False, 'import queue\n'), ((1556, 1578), 'queue.PriorityQueue', 'queue.PriorityQueue', (['(3)'], {}), '(3)\n', (1575, 1578), False, 'import queue\n'), ((2256, 2310), 'detectron2.data.get_detection_dataset_dicts', 'get_detection_dataset_dicts', (["[f'{dataset_name}_train']"], {}), "([f'{dataset_name}_train'])\n", (2283, 2310), False, 'from detectron2.data import get_detection_dataset_dicts\n'), ((2340, 2384), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['f"""{dataset_name}_train"""'], {}), "(f'{dataset_name}_train')\n", (2359, 2384), False, 'from detectron2.data import MetadataCatalog\n'), ((2659, 2668), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2666, 2668), False, 'from detectron2.config import get_cfg\n'), ((3387, 3413), 'detectron2.engine.DefaultPredictor', 'DefaultPredictor', (['self.cfg'], {}), '(self.cfg)\n', (3403, 3413), False, 'from detectron2.engine import DefaultPredictor\n'), ((3638, 3659), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (3650, 3659), True, 'import pandas as pd\n'), ((4400, 4422), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4410, 4422), False, 'import cv2\n'), ((5881, 5968), 'annolid.annotation.masks.mask_iou', 'mask_iou', (["subject_instance_best_score[1]['segmentation']", "out_dict['segmentation']"], {}), "(subject_instance_best_score[1]['segmentation'], out_dict[\n 'segmentation'])\n", (5889, 5968), False, 'from annolid.annotation.masks import mask_iou\n'), ((6360, 6437), 'annolid.annotation.masks.mask_iou', 'mask_iou', (["left_object_best_score[1]['segmentation']", "out_dict['segmentation']"], {}), "(left_object_best_score[1]['segmentation'], out_dict['segmentation'])\n", (6368, 6437), False, 'from annolid.annotation.masks import mask_iou\n'), ((6795, 6873), 'annolid.annotation.masks.mask_iou', 'mask_iou', (["right_object_best_score[1]['segmentation']", "out_dict['segmentation']"], {}), "(right_object_best_score[1]['segmentation'], out_dict['segmentation'])\n", (6803, 6873), False, 'from annolid.annotation.masks import mask_iou\n'), ((7249, 7345), 'annolid.annotation.masks.mask_iou', 'mask_iou', (["right_object_best_score[1]['segmentation']", "subject_best_score[1]['segmentation']"], {}), "(right_object_best_score[1]['segmentation'], subject_best_score[1][\n 'segmentation'])\n", (7257, 7345), False, 'from annolid.annotation.masks import mask_iou\n'), ((7763, 7858), 'annolid.annotation.masks.mask_iou', 'mask_iou', (["left_object_best_score[1]['segmentation']", "subject_best_score[1]['segmentation']"], {}), "(left_object_best_score[1]['segmentation'], subject_best_score[1][\n 'segmentation'])\n", (7771, 7858), False, 'from annolid.annotation.masks import mask_iou\n'), ((8459, 8501), 'torchvision.ops.nms', 'nms', (['boxes', 'scores', 'self.overlap_threshold'], {}), '(boxes, scores, self.overlap_threshold)\n', (8462, 8501), False, 'from torchvision.ops import nms\n'), ((12907, 12935), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (12923, 12935), False, 'import cv2\n'), ((13084, 13106), 'annolid.data.videos.key_frames', 'key_frames', (['video_path'], {}), '(video_path)\n', (13094, 13106), False, 'from annolid.data.videos import key_frames\n'), ((1269, 1291), 'pathlib.Path', 'Path', (['self.dataset_dir'], {}), '(self.dataset_dir)\n', (1273, 1291), False, 'from pathlib import Path\n'), ((1879, 2011), 'detectron2.data.datasets.register_coco_instances', 'register_coco_instances', (['f"""{dataset_name}_train"""', '{}', 'f"""{self.dataset_dir}/train/annotations.json"""', 'f"""{self.dataset_dir}/train/"""'], {}), "(f'{dataset_name}_train', {},\n f'{self.dataset_dir}/train/annotations.json', f'{self.dataset_dir}/train/')\n", (1902, 2011), False, 'from detectron2.data.datasets import register_coco_instances\n'), ((2033, 2165), 'detectron2.data.datasets.register_coco_instances', 'register_coco_instances', (['f"""{dataset_name}_valid"""', '{}', 'f"""{self.dataset_dir}/valid/annotations.json"""', 'f"""{self.dataset_dir}/valid/"""'], {}), "(f'{dataset_name}_valid', {},\n f'{self.dataset_dir}/valid/annotations.json', f'{self.dataset_dir}/valid/')\n", (2056, 2165), False, 'from detectron2.data.datasets import register_coco_instances\n'), ((2751, 2837), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['"""COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"""'], {}), "(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n", (2776, 2837), False, 'from detectron2 import model_zoo\n'), ((3081, 3106), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3104, 3106), False, 'import torch\n'), ((3865, 3890), 'annolid.postprocessing.quality_control.pred_dict_to_labelme', 'pred_dict_to_labelme', (['res'], {}), '(res)\n', (3885, 3890), False, 'from annolid.postprocessing.quality_control import pred_dict_to_labelme\n'), ((3952, 3968), 'pathlib.Path', 'Path', (['image_path'], {}), '(image_path)\n', (3956, 3968), False, 'from pathlib import Path\n'), ((5119, 5133), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5130, 5133), False, 'import cv2\n'), ((13183, 13208), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13206, 13208), False, 'import torch\n'), ((13264, 13309), 'annolid.data.videos.frame_from_video', 'videos.frame_from_video', (['self.cap', 'num_frames'], {}), '(self.cap, num_frames)\n', (13287, 13309), False, 'from annolid.data import videos\n'), ((14354, 14389), 'pandas.DataFrame', 'pd.DataFrame', (['self.tracking_results'], {}), '(self.tracking_results)\n', (14366, 14389), True, 'import pandas as pd\n'), ((9987, 10041), 'annolid.postprocessing.quality_control.TracksResults.switch_left_right', 'TracksResults.switch_left_right', (['out_dict'], {'width': 'width'}), '(out_dict, width=width)\n', (10018, 10041), False, 'from annolid.postprocessing.quality_control import TracksResults\n'), ((14529, 14551), 'pathlib.Path', 'Path', (['self.dataset_dir'], {}), '(self.dataset_dir)\n', (14533, 14551), False, 'from pathlib import Path\n'), ((4088, 4104), 'pathlib.Path', 'Path', (['image_path'], {}), '(image_path)\n', (4092, 4104), False, 'from pathlib import Path\n'), ((4818, 4865), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['self.cfg.DATASETS.TRAIN[0]'], {}), '(self.cfg.DATASETS.TRAIN[0])\n', (4837, 4865), False, 'from detectron2.data import MetadataCatalog\n'), ((12587, 12605), 'pathlib.Path', 'Path', (['image_folder'], {}), '(image_folder)\n', (12591, 12605), False, 'from pathlib import Path\n'), ((12842, 12858), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (12846, 12858), False, 'from pathlib import Path\n'), ((8959, 9011), 'numpy.array', 'np.array', (['mask[:, :, None]'], {'order': '"""F"""', 'dtype': '"""uint8"""'}), "(mask[:, :, None], order='F', dtype='uint8')\n", (8967, 9011), True, 'import numpy as np\n'), ((12678, 12696), 'pathlib.Path', 'Path', (['image_folder'], {}), '(image_folder)\n', (12682, 12696), False, 'from pathlib import Path\n'), ((14601, 14623), 'pathlib.Path', 'Path', (['self.dataset_dir'], {}), '(self.dataset_dir)\n', (14605, 14623), False, 'from pathlib import Path\n'), ((14676, 14692), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (14680, 14692), False, 'from pathlib import Path\n')] |
""" Module implementing GAN which will be trained using the Progressive growing
technique -> https://arxiv.org/abs/1710.10196
"""
import datetime
import os
import time
import timeit
import copy
import numpy as np
import torch as th
class Generator(th.nn.Module):
""" Generator of the GAN network """
def __init__(self, depth=7, latent_size=512, use_eql=True):
"""
constructor for the Generator class
:param depth: required depth of the Network
:param latent_size: size of the latent manifold
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, Conv2d
from MSG_GAN.CustomLayers import GenGeneralConvBlock, \
GenInitialBlock, _equalized_conv2d
super().__init__()
assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
"latent size not a power of 2"
if depth >= 4:
assert latent_size >= np.power(2, depth - 4), "latent size will diminish to zero"
# state of the generator:
self.use_eql = use_eql
self.depth = depth
self.latent_size = latent_size
# register the modules required for the Generator Below ...
# create the ToRGB layers for various outputs:
if self.use_eql:
def to_rgb(in_channels):
return _equalized_conv2d(in_channels, 3, (1, 1), bias=True)
else:
def to_rgb(in_channels):
return Conv2d(in_channels, 3, (1, 1), bias=True)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([GenInitialBlock(self.latent_size, use_eql=self.use_eql)])
self.rgb_converters = ModuleList([to_rgb(self.latent_size)])
# create the remaining layers
for i in range(self.depth - 1):
if i <= 2:
layer = GenGeneralConvBlock(self.latent_size, self.latent_size,
use_eql=self.use_eql)
rgb = to_rgb(self.latent_size)
else:
layer = GenGeneralConvBlock(
int(self.latent_size // np.power(2, i - 3)),
int(self.latent_size // np.power(2, i - 2)),
use_eql=self.use_eql
)
rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))
self.layers.append(layer)
self.rgb_converters.append(rgb)
def forward(self, x):
"""
forward pass of the Generator
:param x: input noise
:return: *y => output of the generator at various scales
"""
outputs = [] # initialize to empty list
y = x # start the computational pipeline
for block, converter in zip(self.layers, self.rgb_converters):
y = block(y)
outputs.append(converter(y))
return outputs
@staticmethod
def adjust_dynamic_range(data, drange_in=(-1, 1), drange_out=(0, 1)):
"""
adjust the dynamic colour range of the given input data
:param data: input image data
:param drange_in: original range of input
:param drange_out: required range of output
:return: img => colour range adjusted images
"""
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (
np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return th.clamp(data, min=0, max=1)
class Discriminator(th.nn.Module):
""" Discriminator of the GAN """
def __init__(self, depth=7, feature_size=512,
use_eql=True, gpu_parallelize=False):
"""
constructor for the class
:param depth: total depth of the discriminator
(Must be equal to the Generator depth)
:param feature_size: size of the deepest features extracted
(Must be equal to Generator latent_size)
:param use_eql: whether to use the equalized learning rate or not
:param gpu_parallelize: whether to use DataParallel on the discriminator
Note that the Last block contains StdDev layer
So, it is not parallelized.
"""
from torch.nn import ModuleList
from MSG_GAN.CustomLayers import DisGeneralConvBlock, \
DisFinalBlock, _equalized_conv2d
from torch.nn import Conv2d
super().__init__()
assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
"latent size not a power of 2"
if depth >= 4:
assert feature_size >= np.power(2, depth - 4), \
"feature size cannot be produced"
# create state of the object
self.gpu_parallelize = gpu_parallelize
self.use_eql = use_eql
self.depth = depth
self.feature_size = feature_size
# create the fromRGB layers for various inputs:
if self.use_eql:
def from_rgb(out_channels):
return _equalized_conv2d(3, out_channels, (1, 1), bias=True)
else:
def from_rgb(out_channels):
return Conv2d(3, out_channels, (1, 1), bias=True)
self.rgb_to_features = ModuleList()
self.final_converter = from_rgb(self.feature_size)
# create a module list of the other required general convolution blocks
self.layers = ModuleList()
self.final_block = DisFinalBlock(self.feature_size * 2, use_eql=self.use_eql)
# create the remaining layers
for i in range(self.depth - 1):
if i > 2:
layer = DisGeneralConvBlock(
int(self.feature_size // np.power(2, i - 3)),
int(self.feature_size // np.power(2, i - 3)),
use_eql=self.use_eql
)
rgb = from_rgb(int(self.feature_size // np.power(2, i - 2)))
else:
layer = DisGeneralConvBlock(self.feature_size * 2, self.feature_size,
use_eql=self.use_eql)
rgb = from_rgb(self.feature_size)
self.layers.append(layer)
self.rgb_to_features.append(rgb)
# handle the case where the depth is less than or equal to 4
if self.depth > 4:
self.rgb_to_features[self.depth - 2] = \
from_rgb(self.feature_size // np.power(2, i - 3))
else:
self.rgb_to_features[self.depth - 2] = \
from_rgb(self.feature_size * 2)
# parallelize the modules from the module-lists if asked to:
if self.gpu_parallelize:
for i in range(len(self.layers)):
self.layers[i] = th.nn.DataParallel(self.layers[i])
self.rgb_to_features[i] = th.nn.DataParallel(
self.rgb_to_features[i])
# Note that since the FinalBlock contains the StdDev layer,
# it cannot be parallelized so easily. It will have to be parallelized
# from the Lower level (from CustomLayers). This much parallelism
# seems enough for me.
def forward(self, inputs):
"""
forward pass of the discriminator
:param inputs: (multi-scale input images) to the network list[Tensors]
:return: out => raw prediction values
"""
assert len(inputs) == self.depth, \
"Mismatch between input and Network scales"
y = self.rgb_to_features[self.depth - 2](inputs[self.depth - 1])
y = self.layers[self.depth - 2](y)
for x, block, converter in \
zip(reversed(inputs[1:-1]),
reversed(self.layers[:-1]),
reversed(self.rgb_to_features[:-1])):
input_part = converter(x) # convert the input:
y = th.cat((input_part, y), dim=1) # concatenate the inputs:
y = block(y) # apply the block
# calculate the final block:
input_part = self.final_converter(inputs[0])
y = th.cat((input_part, y), dim=1)
y = self.final_block(y)
# return calculated y
return y
class MSG_GAN:
""" Unconditional MSG-GAN
args:
depth: depth of the GAN (will be used for each generator and discriminator)
latent_size: latent size of the manifold used by the GAN
use_eql: whether to use the equalized learning rate
use_ema: whether to use exponential moving averages.
ema_decay: value of ema decay. Used only if use_ema is True
device: device to run the GAN on (GPU / CPU)
"""
def __init__(self, depth=7, latent_size=512,
use_eql=True, use_ema=True, ema_decay=0.999,
device=th.device("cpu")):
""" constructor for the class """
from torch.nn import DataParallel
self.gen = Generator(depth, latent_size, use_eql=use_eql).to(device)
# Parallelize them if required:
if device == th.device("cuda"):
self.gen = DataParallel(self.gen)
self.dis = Discriminator(depth, latent_size,
use_eql=use_eql, gpu_parallelize=True).to(device)
else:
self.dis = Discriminator(depth, latent_size, use_eql=True).to(device)
# state of the object
self.use_ema = use_ema
self.ema_decay = ema_decay
self.use_eql = use_eql
self.latent_size = latent_size
self.depth = depth
self.device = device
if self.use_ema:
from MSG_GAN.CustomLayers import update_average
# create a shadow copy of the generator
self.gen_shadow = copy.deepcopy(self.gen)
# updater function:
self.ema_updater = update_average
# initialize the gen_shadow weights equal to the
# weights of gen
self.ema_updater(self.gen_shadow, self.gen, beta=0)
# by default the generator and discriminator are in eval mode
self.gen.eval()
self.dis.eval()
if self.use_ema:
self.gen_shadow.eval()
def generate_samples(self, num_samples):
"""
generate samples using this gan
:param num_samples: number of samples to be generated
:return: generated samples tensor: list[ Tensor(B x H x W x C)]
"""
noise = th.randn(num_samples, self.latent_size).to(self.device)
generated_images = self.gen(noise)
# reshape the generated images
generated_images = list(map(lambda x: (x.detach().permute(0, 2, 3, 1) / 2) + 0.5,
generated_images))
return generated_images
def optimize_discriminator(self, dis_optim, noise,
real_batch, loss_fn,
accumulate=False, zero_grad=True,
num_accumulations=1):
"""
performs one step of weight update on discriminator using the batch of data
:param dis_optim: discriminator optimizer
:param noise: input noise of sample generation
:param real_batch: real samples batch
should contain a list of tensors at different scales
:param loss_fn: loss function to be used (object of GANLoss)
:param accumulate: whether to accumulate or make a step
:param zero_grad: used to control the behaviour of grad buffers
:param num_accumulations: number of accumulation steps performed
(required to scale the loss function)
:return: current loss (accumulation scaled value)
"""
# generate a batch of samples
fake_samples = self.gen(noise)
fake_samples = list(map(lambda x: x.detach(), fake_samples))
# scale the loss by the number of accumulation steps performed
# (if not performed, it is 1)
loss = loss_fn.dis_loss(real_batch, fake_samples) / num_accumulations
# optimize discriminator according to the accumulation dynamics
# zero the grad of the discriminator weights if required
if zero_grad:
dis_optim.zero_grad()
# perform the backward pass to accumulate the gradients
loss.backward()
# if not running in accumulate mode, (make a step)
if not accumulate:
dis_optim.step()
return loss.item()
def optimize_generator(self, gen_optim, noise,
real_batch, loss_fn,
accumulate=False, zero_grad=True,
num_accumulations=1):
"""
performs one step of weight update on generator using the batch of data
:param gen_optim: generator optimizer
:param noise: input noise of sample generation
:param real_batch: real samples batch
should contain a list of tensors at different scales
:param loss_fn: loss function to be used (object of GANLoss)
:param accumulate: whether to accumulate or make a step
:param zero_grad: used to control the behaviour of grad buffers
:param num_accumulations: number of accumulation steps performed
(required to scale the loss function)
:return: current loss (accumulation scaled value)
"""
# generate a batch of samples
fake_samples = self.gen(noise)
loss = loss_fn.gen_loss(real_batch, fake_samples) / num_accumulations
# optimize the generator according the accumulation dynamics
if zero_grad:
gen_optim.zero_grad()
# perform backward pass for gradient accumulation
loss.backward()
# perform an update step if not running in the accumulate mode
if not accumulate:
gen_optim.step()
# if self.use_ema is true, apply the moving average here:
# Note that ema update will also be done only during the update
# pass of the function (not during accumulation).
if self.use_ema:
self.ema_updater(self.gen_shadow, self.gen, self.ema_decay)
return loss.item()
def create_grid(self, samples, img_files,
sum_writer=None, reses=None, step=None):
"""
utility function to create a grid of GAN samples
:param samples: generated samples for storing list[Tensors]
:param img_files: list of names of files to write
:param sum_writer: summary writer object
:param reses: resolution strings (used only if sum_writer is not None)
:param step: global step (used only if sum_writer is not None)
:return: None (saves multiple files)
"""
from torchvision.utils import save_image, make_grid
from torch.nn.functional import interpolate
from numpy import sqrt, power, ceil
# dynamically adjust the colour range of the images:
samples = [Generator.adjust_dynamic_range(sample) for sample in samples]
# resize the samples to have same resolution:
for i in range(len(samples)):
samples[i] = interpolate(samples[i],
scale_factor=power(2, self.depth - 1 - i))
# save the images:
for sample, img_file in zip(samples, img_files):
save_image(sample, img_file, nrow=int(sqrt(sample.shape[0])),
normalize=True, scale_each=True, padding=0)
if sum_writer is not None:
for sample, res in zip(samples, reses):
image = make_grid(sample, nrow=int(ceil(sqrt(sample.shape[0]))),
normalize=True, scale_each=True, padding=0)
sum_writer.add_image(res, image, step)
def _downsampled_images(self, images):
"""
private utility function to compute list of downsampled images
:param images: Original sized images
:return: images => list of downsampled images
"""
from torch.nn.functional import avg_pool2d
# create a list of downsampled images from the real images:
images = [images] + [avg_pool2d(images, int(np.power(2, i)))
for i in range(1, self.depth)]
images = list(reversed(images))
return images
def _get_images_and_latents(self, data_store, normalize_latents):
"""
private utility function to obtain random latent_points and
downsampled images from the datastore
:param data_store: object containing the data
:param normalize_latents: boolean for hyper-sphere normalization
:return: images, latents => images and random latent points
"""
# extract current batch of data for training
batch = next(data_store)
images = batch.to(self.device)
extracted_batch_size = images.shape[0]
# list of downsampled versions of images
images = self._downsampled_images(images)
# sample some random latent points
gan_input = th.randn(
extracted_batch_size, self.latent_size).to(self.device)
# normalize them if asked
if normalize_latents:
gan_input = ((gan_input
/ gan_input.norm(dim=-1, keepdim=True))
* (self.latent_size ** 0.5))
return images, gan_input
def train(self, data, gen_optim, dis_optim, loss_fn, normalize_latents=True,
start=1, num_epochs=12, spoofing_factor=1,
feedback_factor=10, checkpoint_factor=1,
data_percentage=100, num_samples=36,
log_dir=None, sample_dir="./samples",
log_fid_values=False, num_fid_images=50000,
save_dir="./models", fid_temp_folder="./samples/fid_imgs/",
fid_real_stats=None, fid_batch_size=64):
"""
Method for training the network
:param data: pytorch dataloader which iterates over images
:param gen_optim: Optimizer for generator.
please wrap this inside a Scheduler if you want to
:param dis_optim: Optimizer for discriminator.
please wrap this inside a Scheduler if you want to
:param loss_fn: Object of GANLoss
:param normalize_latents: whether to normalize the latent vectors during training
:param start: starting epoch number
:param num_epochs: total number of epochs to run for (ending epoch number)
note this is absolute and not relative to start
:param spoofing_factor: number of actual batches used to spoof a bigger batch
for instance, actual batch size is 16 and
spoofing factor is 4, then virtual batch_size is 64
:param feedback_factor: number of logs generated and samples generated
during training per epoch
:param checkpoint_factor: save model after these many epochs
:param data_percentage: amount of data to be used
:param num_samples: number of samples to be drawn for feedback grid
:param log_dir: path to directory for saving the loss.log file
:param sample_dir: path to directory for saving generated samples' grids
:param log_fid_values: boolean for whether to log fid values during training or not
:param num_fid_images: number of images to generate for calculating the FID
:param save_dir: path to directory for saving the trained models
:param fid_temp_folder: path to save the generated images
:param fid_real_stats: path to the npz stats file for real images
:param fid_batch_size: batch size used for generating fid images
Same will be used for calculating the fid too.
:return: None (writes multiple files to disk)
"""
from tensorboardX import SummaryWriter
from shutil import rmtree
from tqdm import tqdm
from scipy.misc import imsave
from MSG_GAN.FID import fid_score
from math import ceil
from MSG_GAN.utils.iter_utils import hn_wrapper
# turn the generator and discriminator into train mode
self.gen.train()
self.dis.train()
assert isinstance(gen_optim, th.optim.Optimizer), \
"gen_optim is not an Optimizer"
assert isinstance(dis_optim, th.optim.Optimizer), \
"dis_optim is not an Optimizer"
print("Starting the training process ... ")
# create the summary writer
sum_writer = SummaryWriter(os.path.join(log_dir, "tensorboard"))
# create a grid of samples and save it
reses = [str(int(np.power(2, dep))) + "_x_"
+ str(int(np.power(2, dep)))
for dep in range(2, self.depth + 2)]
# create fixed_input for debugging
fixed_input = th.randn(num_samples, self.latent_size).to(self.device)
if normalize_latents:
fixed_input = (fixed_input
/ fixed_input.norm(dim=-1, keepdim=True)
* (self.latent_size ** 0.5))
viter_samples = 2 * data.batch_size * spoofing_factor
total_imgs = len(data.dataset)
total_batches = int(total_imgs / viter_samples)
limit = int(ceil((data_percentage / 100) * total_batches))
# create a global time counter
global_time = time.time()
global_step = 0
for epoch in range(start, num_epochs + 1):
start_time = timeit.default_timer() # record time at the start of epoch
print("\nEpoch: %d" % epoch)
# setup the dataloader (where the real images are sampled from)
real_data_store = iter(hn_wrapper(data))
batch_counter = 0 # counter for number of batches completed
# this includes the two Generator passes and spoofing adjusted
# batch_sizes
# Note that this might lose the last batch (samples less than batch_size)
# but the subsequent epochs perform random shuffling prior to starting the
# training for that epoch
while real_data_store.hasnext() and batch_counter < limit:
# perform batch spoofing via gradient accumulation
dis_loss, gen_loss = 0, 0 # losses initialized to zeros
# =================================================================
# discriminator iterations:
# =================================================================
for spoofing_iter in range(spoofing_factor - 1):
# =============================================================
# Discriminator spoofing pass
# =============================================================
# sample images and latents for discriminator pass
images, gan_input = self._get_images_and_latents(
real_data_store, normalize_latents)
# accumulate gradients in the discriminator:
dis_loss += self.optimize_discriminator(
dis_optim, gan_input,
images, loss_fn,
accumulate=True,
zero_grad=(spoofing_iter == 0),
num_accumulations=spoofing_factor)
# =============================================================
# Discriminator update pass
# (note values for accumulate and zero_grad)
# =============================================================
# sample images and latents for discriminator pass
images, gan_input = self._get_images_and_latents(
real_data_store, normalize_latents)
# accumulate final gradients in the discriminator and make a step:
dis_loss += self.optimize_discriminator(
dis_optim, gan_input,
images, loss_fn,
accumulate=False, # perform update
zero_grad=spoofing_factor == 1, # make gradient buffers zero if spoofing_factor is 1
num_accumulations=spoofing_factor)
# =================================================================
# =================================================================
# generator iterations:
# =================================================================
for spoofing_iter in range(spoofing_factor - 1):
# =============================================================
# Generator spoofing pass
# =============================================================
# re-sample images and latents for generator pass
images, gan_input = self._get_images_and_latents(
real_data_store, normalize_latents)
# accumulate gradients in the generator
gen_loss += self.optimize_generator(
gen_optim, gan_input,
images, loss_fn,
accumulate=True,
zero_grad=(spoofing_iter == 0),
num_accumulations=spoofing_factor)
# =============================================================
# Generator update pass
# (note values for accumulate and zero_grad)
# =============================================================
# sample images and latents for generator pass
images, gan_input = self._get_images_and_latents(
real_data_store, normalize_latents)
# accumulate final gradients in the generator and make a step:
gen_loss += self.optimize_generator(
gen_optim, gan_input,
images, loss_fn,
accumulate=False, # perform update
zero_grad=spoofing_factor == 1, # make gradient buffers zero if spoofing_factor is 1
num_accumulations=spoofing_factor)
# =================================================================
# increment the global_step and the batch_counter:
global_step += 1
batch_counter += 1
# provide a loss feedback
if batch_counter % int(limit / feedback_factor) == 0 or \
batch_counter == 1:
elapsed = time.time() - global_time
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Elapsed [%s] batch: %d d_loss: %f g_loss: %f"
% (elapsed, batch_counter, dis_loss, gen_loss))
# add summary of the losses
sum_writer.add_scalar("dis_loss", dis_loss, global_step)
sum_writer.add_scalar("gen_loss", gen_loss, global_step)
# also write the losses to the log file:
if log_dir is not None:
log_file = os.path.join(log_dir, "loss.log")
os.makedirs(os.path.dirname(log_file), exist_ok=True)
with open(log_file, "a") as log:
log.write(str(global_step) + "\t" + str(dis_loss) +
"\t" + str(gen_loss) + "\n")
# create a grid of samples and save it
gen_img_files = [os.path.join(sample_dir, res, "gen_" +
str(epoch) + "_" +
str(batch_counter) + ".png")
for res in reses]
# Make sure all the required directories exist
# otherwise make them
os.makedirs(sample_dir, exist_ok=True)
for gen_img_file in gen_img_files:
os.makedirs(os.path.dirname(gen_img_file), exist_ok=True)
# following zero_grads are required to allow pytorch
# adjust buffers properly on the GPU.
# This causes lesser GPU memory consumption
dis_optim.zero_grad()
gen_optim.zero_grad()
with th.no_grad(): # this makes the training faster.
self.create_grid(
self.gen(fixed_input) if not self.use_ema
else self.gen_shadow(fixed_input),
gen_img_files,
sum_writer,
reses,
global_step)
# calculate the time required for the epoch
stop_time = timeit.default_timer()
print("Time taken for epoch: %.3f secs" % (stop_time - start_time))
if epoch % checkpoint_factor == 0 or epoch == 1 or epoch == num_epochs:
os.makedirs(save_dir, exist_ok=True)
gen_save_file = os.path.join(save_dir, "GAN_GEN_" + str(epoch) + ".pth")
dis_save_file = os.path.join(save_dir, "GAN_DIS_" + str(epoch) + ".pth")
gen_optim_save_file = os.path.join(save_dir,
"GAN_GEN_OPTIM_" + str(epoch) + ".pth")
dis_optim_save_file = os.path.join(save_dir,
"GAN_DIS_OPTIM_" + str(epoch) + ".pth")
th.save(self.gen.state_dict(), gen_save_file)
th.save(self.dis.state_dict(), dis_save_file)
th.save(gen_optim.state_dict(), gen_optim_save_file)
th.save(dis_optim.state_dict(), dis_optim_save_file)
if self.use_ema:
gen_shadow_save_file = os.path.join(save_dir, "GAN_GEN_SHADOW_"
+ str(epoch) + ".pth")
th.save(self.gen_shadow.state_dict(), gen_shadow_save_file)
print("log_fid_values:", log_fid_values)
if log_fid_values: # perform the following fid calculations during training
# if the boolean is set to true
# ==================================================================
# Perform the FID calculation during training for estimating
# the quality of the training
# ==================================================================
# setup the directory for generating the images
if os.path.isdir(fid_temp_folder):
rmtree(fid_temp_folder)
os.makedirs(fid_temp_folder, exist_ok=True)
# generate the images:
print("generating images for fid calculation ...")
pbar = tqdm(total=num_fid_images)
generated_images = 0
while generated_images < num_fid_images:
b_size = min(fid_batch_size, num_fid_images - generated_images)
points = th.randn(b_size, self.latent_size).to(self.device)
if normalize_latents:
points = (points / points.norm(dim=1, keepdim=True)) \
* (self.latent_size ** 0.5)
imgs = self.gen(points)[-1].detach()
for i in range(len(imgs)):
imgs[i] = Generator.adjust_dynamic_range(imgs[i])
pbar.update(b_size)
for img in imgs:
imsave(os.path.join(fid_temp_folder,
str(generated_images) + ".jpg"),
img.permute(1, 2, 0).cpu())
generated_images += 1
pbar.close()
# compute the fid now:
fid = fid_score.calculate_fid_given_paths(
(fid_real_stats, fid_temp_folder),
fid_batch_size,
True if self.device == th.device("cuda") else False,
2048 # using he default value
)
# print the compute fid value:
print("FID at epoch %d: %.6f" % (epoch, fid))
# log the fid value in tensorboard:
sum_writer.add_scalar("FID", fid, epoch)
# note that for fid value, the global step is the epoch number.
# it is not the global step. This makes the fid graph more informative
# ==================================================================
print("Training completed ...")
# return the generator and discriminator back to eval mode
self.gen.eval()
self.dis.eval()
| [
"MSG_GAN.CustomLayers.DisFinalBlock",
"numpy.sqrt",
"MSG_GAN.CustomLayers.DisGeneralConvBlock",
"MSG_GAN.utils.iter_utils.hn_wrapper",
"copy.deepcopy",
"datetime.timedelta",
"torch.nn.ModuleList",
"MSG_GAN.CustomLayers._equalized_conv2d",
"os.path.isdir",
"MSG_GAN.CustomLayers.GenInitialBlock",
... | [((3641, 3669), 'torch.clamp', 'th.clamp', (['data'], {'min': '(0)', 'max': '(1)'}), '(data, min=0, max=1)\n', (3649, 3669), True, 'import torch as th\n'), ((5466, 5478), 'torch.nn.ModuleList', 'ModuleList', ([], {}), '()\n', (5476, 5478), False, 'from torch.nn import ModuleList\n'), ((5641, 5653), 'torch.nn.ModuleList', 'ModuleList', ([], {}), '()\n', (5651, 5653), False, 'from torch.nn import ModuleList\n'), ((5681, 5739), 'MSG_GAN.CustomLayers.DisFinalBlock', 'DisFinalBlock', (['(self.feature_size * 2)'], {'use_eql': 'self.use_eql'}), '(self.feature_size * 2, use_eql=self.use_eql)\n', (5694, 5739), False, 'from MSG_GAN.CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\n'), ((8275, 8305), 'torch.cat', 'th.cat', (['(input_part, y)'], {'dim': '(1)'}), '((input_part, y), dim=1)\n', (8281, 8305), True, 'import torch as th\n'), ((9007, 9023), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (9016, 9023), True, 'import torch as th\n'), ((21831, 21842), 'time.time', 'time.time', ([], {}), '()\n', (21840, 21842), False, 'import time\n'), ((8070, 8100), 'torch.cat', 'th.cat', (['(input_part, y)'], {'dim': '(1)'}), '((input_part, y), dim=1)\n', (8076, 8100), True, 'import torch as th\n'), ((9250, 9267), 'torch.device', 'th.device', (['"""cuda"""'], {}), "('cuda')\n", (9259, 9267), True, 'import torch as th\n'), ((9292, 9314), 'torch.nn.DataParallel', 'DataParallel', (['self.gen'], {}), '(self.gen)\n', (9304, 9314), False, 'from torch.nn import DataParallel\n'), ((9947, 9970), 'copy.deepcopy', 'copy.deepcopy', (['self.gen'], {}), '(self.gen)\n', (9960, 9970), False, 'import copy\n'), ((20991, 21027), 'os.path.join', 'os.path.join', (['log_dir', '"""tensorboard"""'], {}), "(log_dir, 'tensorboard')\n", (21003, 21027), False, 'import os\n'), ((21722, 21765), 'math.ceil', 'ceil', (['(data_percentage / 100 * total_batches)'], {}), '(data_percentage / 100 * total_batches)\n', (21726, 21765), False, 'from math import ceil\n'), ((21944, 21966), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21964, 21966), False, 'import timeit\n'), ((29480, 29502), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (29500, 29502), False, 'import timeit\n'), ((982, 1004), 'numpy.power', 'np.power', (['(2)', '(depth - 4)'], {}), '(2, depth - 4)\n', (990, 1004), True, 'import numpy as np\n'), ((1383, 1435), 'MSG_GAN.CustomLayers._equalized_conv2d', '_equalized_conv2d', (['in_channels', '(3)', '(1, 1)'], {'bias': '(True)'}), '(in_channels, 3, (1, 1), bias=True)\n', (1400, 1435), False, 'from MSG_GAN.CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\n'), ((1510, 1551), 'torch.nn.Conv2d', 'Conv2d', (['in_channels', '(3)', '(1, 1)'], {'bias': '(True)'}), '(in_channels, 3, (1, 1), bias=True)\n', (1516, 1551), False, 'from torch.nn import Conv2d\n'), ((1667, 1722), 'MSG_GAN.CustomLayers.GenInitialBlock', 'GenInitialBlock', (['self.latent_size'], {'use_eql': 'self.use_eql'}), '(self.latent_size, use_eql=self.use_eql)\n', (1682, 1722), False, 'from MSG_GAN.CustomLayers import GenGeneralConvBlock, GenInitialBlock, _equalized_conv2d\n'), ((1920, 1997), 'MSG_GAN.CustomLayers.GenGeneralConvBlock', 'GenGeneralConvBlock', (['self.latent_size', 'self.latent_size'], {'use_eql': 'self.use_eql'}), '(self.latent_size, self.latent_size, use_eql=self.use_eql)\n', (1939, 1997), False, 'from MSG_GAN.CustomLayers import GenGeneralConvBlock, GenInitialBlock, _equalized_conv2d\n'), ((3525, 3550), 'numpy.float32', 'np.float32', (['drange_out[0]'], {}), '(drange_out[0])\n', (3535, 3550), True, 'import numpy as np\n'), ((4855, 4877), 'numpy.power', 'np.power', (['(2)', '(depth - 4)'], {}), '(2, depth - 4)\n', (4863, 4877), True, 'import numpy as np\n'), ((5260, 5313), 'MSG_GAN.CustomLayers._equalized_conv2d', '_equalized_conv2d', (['(3)', 'out_channels', '(1, 1)'], {'bias': '(True)'}), '(3, out_channels, (1, 1), bias=True)\n', (5277, 5313), False, 'from MSG_GAN.CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\n'), ((5391, 5433), 'torch.nn.Conv2d', 'Conv2d', (['(3)', 'out_channels', '(1, 1)'], {'bias': '(True)'}), '(3, out_channels, (1, 1), bias=True)\n', (5397, 5433), False, 'from torch.nn import Conv2d\n'), ((6196, 6284), 'MSG_GAN.CustomLayers.DisGeneralConvBlock', 'DisGeneralConvBlock', (['(self.feature_size * 2)', 'self.feature_size'], {'use_eql': 'self.use_eql'}), '(self.feature_size * 2, self.feature_size, use_eql=self.\n use_eql)\n', (6215, 6284), False, 'from MSG_GAN.CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\n'), ((6971, 7005), 'torch.nn.DataParallel', 'th.nn.DataParallel', (['self.layers[i]'], {}), '(self.layers[i])\n', (6989, 7005), True, 'import torch as th\n'), ((7048, 7091), 'torch.nn.DataParallel', 'th.nn.DataParallel', (['self.rgb_to_features[i]'], {}), '(self.rgb_to_features[i])\n', (7066, 7091), True, 'import torch as th\n'), ((10644, 10683), 'torch.randn', 'th.randn', (['num_samples', 'self.latent_size'], {}), '(num_samples, self.latent_size)\n', (10652, 10683), True, 'import torch as th\n'), ((17393, 17441), 'torch.randn', 'th.randn', (['extracted_batch_size', 'self.latent_size'], {}), '(extracted_batch_size, self.latent_size)\n', (17401, 17441), True, 'import torch as th\n'), ((21295, 21334), 'torch.randn', 'th.randn', (['num_samples', 'self.latent_size'], {}), '(num_samples, self.latent_size)\n', (21303, 21334), True, 'import torch as th\n'), ((22157, 22173), 'MSG_GAN.utils.iter_utils.hn_wrapper', 'hn_wrapper', (['data'], {}), '(data)\n', (22167, 22173), False, 'from MSG_GAN.utils.iter_utils import hn_wrapper\n'), ((29684, 29720), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (29695, 29720), False, 'import os\n'), ((3373, 3398), 'numpy.float32', 'np.float32', (['drange_out[1]'], {}), '(drange_out[1])\n', (3383, 3398), True, 'import numpy as np\n'), ((3401, 3426), 'numpy.float32', 'np.float32', (['drange_out[0]'], {}), '(drange_out[0])\n', (3411, 3426), True, 'import numpy as np\n'), ((3452, 3476), 'numpy.float32', 'np.float32', (['drange_in[1]'], {}), '(drange_in[1])\n', (3462, 3476), True, 'import numpy as np\n'), ((3479, 3503), 'numpy.float32', 'np.float32', (['drange_in[0]'], {}), '(drange_in[0])\n', (3489, 3503), True, 'import numpy as np\n'), ((3553, 3577), 'numpy.float32', 'np.float32', (['drange_in[0]'], {}), '(drange_in[0])\n', (3563, 3577), True, 'import numpy as np\n'), ((6654, 6672), 'numpy.power', 'np.power', (['(2)', '(i - 3)'], {}), '(2, i - 3)\n', (6662, 6672), True, 'import numpy as np\n'), ((15546, 15574), 'numpy.power', 'power', (['(2)', '(self.depth - 1 - i)'], {}), '(2, self.depth - 1 - i)\n', (15551, 15574), False, 'from numpy import sqrt, power, ceil\n'), ((28535, 28573), 'os.makedirs', 'os.makedirs', (['sample_dir'], {'exist_ok': '(True)'}), '(sample_dir, exist_ok=True)\n', (28546, 28573), False, 'import os\n'), ((31348, 31378), 'os.path.isdir', 'os.path.isdir', (['fid_temp_folder'], {}), '(fid_temp_folder)\n', (31361, 31378), False, 'import os\n'), ((31448, 31491), 'os.makedirs', 'os.makedirs', (['fid_temp_folder'], {'exist_ok': '(True)'}), '(fid_temp_folder, exist_ok=True)\n', (31459, 31491), False, 'import os\n'), ((31634, 31660), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_fid_images'}), '(total=num_fid_images)\n', (31638, 31660), False, 'from tqdm import tqdm\n'), ((15711, 15732), 'numpy.sqrt', 'sqrt', (['sample.shape[0]'], {}), '(sample.shape[0])\n', (15715, 15732), False, 'from numpy import sqrt, power, ceil\n'), ((16513, 16527), 'numpy.power', 'np.power', (['(2)', 'i'], {}), '(2, i)\n', (16521, 16527), True, 'import numpy as np\n'), ((21156, 21172), 'numpy.power', 'np.power', (['(2)', 'dep'], {}), '(2, dep)\n', (21164, 21172), True, 'import numpy as np\n'), ((27160, 27171), 'time.time', 'time.time', ([], {}), '()\n', (27169, 27171), False, 'import time\n'), ((27220, 27255), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (27238, 27255), False, 'import datetime\n'), ((27750, 27783), 'os.path.join', 'os.path.join', (['log_dir', '"""loss.log"""'], {}), "(log_dir, 'loss.log')\n", (27762, 27783), False, 'import os\n'), ((29016, 29028), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (29026, 29028), True, 'import torch as th\n'), ((31404, 31427), 'shutil.rmtree', 'rmtree', (['fid_temp_folder'], {}), '(fid_temp_folder)\n', (31410, 31427), False, 'from shutil import rmtree\n'), ((2196, 2214), 'numpy.power', 'np.power', (['(2)', '(i - 3)'], {}), '(2, i - 3)\n', (2204, 2214), True, 'import numpy as np\n'), ((2261, 2279), 'numpy.power', 'np.power', (['(2)', '(i - 2)'], {}), '(2, i - 2)\n', (2269, 2279), True, 'import numpy as np\n'), ((2394, 2412), 'numpy.power', 'np.power', (['(2)', '(i - 2)'], {}), '(2, i - 2)\n', (2402, 2412), True, 'import numpy as np\n'), ((5931, 5949), 'numpy.power', 'np.power', (['(2)', '(i - 3)'], {}), '(2, i - 3)\n', (5939, 5949), True, 'import numpy as np\n'), ((5997, 6015), 'numpy.power', 'np.power', (['(2)', '(i - 3)'], {}), '(2, i - 3)\n', (6005, 6015), True, 'import numpy as np\n'), ((6133, 6151), 'numpy.power', 'np.power', (['(2)', '(i - 2)'], {}), '(2, i - 2)\n', (6141, 6151), True, 'import numpy as np\n'), ((21102, 21118), 'numpy.power', 'np.power', (['(2)', 'dep'], {}), '(2, dep)\n', (21110, 21118), True, 'import numpy as np\n'), ((27820, 27845), 'os.path.dirname', 'os.path.dirname', (['log_file'], {}), '(log_file)\n', (27835, 27845), False, 'import os\n'), ((28665, 28694), 'os.path.dirname', 'os.path.dirname', (['gen_img_file'], {}), '(gen_img_file)\n', (28680, 28694), False, 'import os\n'), ((15946, 15967), 'numpy.sqrt', 'sqrt', (['sample.shape[0]'], {}), '(sample.shape[0])\n', (15950, 15967), False, 'from numpy import sqrt, power, ceil\n'), ((31885, 31919), 'torch.randn', 'th.randn', (['b_size', 'self.latent_size'], {}), '(b_size, self.latent_size)\n', (31893, 31919), True, 'import torch as th\n'), ((32954, 32971), 'torch.device', 'th.device', (['"""cuda"""'], {}), "('cuda')\n", (32963, 32971), True, 'import torch as th\n')] |
"""
Frontend for xESMF, exposed to users.
"""
import warnings
import cf_xarray as cfxr
import numpy as np
import scipy.sparse as sps
import xarray as xr
from xarray import DataArray
from .backend import Grid, LocStream, Mesh, add_corner, esmf_regrid_build, esmf_regrid_finalize
from .smm import _combine_weight_multipoly, add_nans_to_weights, apply_weights, read_weights
from .util import split_polygons_and_holes
try:
import dask.array as da
dask_array_type = (da.Array,) # for isinstance checks
except ImportError:
dask_array_type = ()
def as_2d_mesh(lon, lat):
if (lon.ndim, lat.ndim) == (2, 2):
assert lon.shape == lat.shape, 'lon and lat should have same shape'
elif (lon.ndim, lat.ndim) == (1, 1):
lon, lat = np.meshgrid(lon, lat)
else:
raise ValueError('lon and lat should be both 1D or 2D')
return lon, lat
def _get_lon_lat(ds):
"""Return lon and lat extracted from ds."""
try:
lon = ds.cf['longitude']
lat = ds.cf['latitude']
except (KeyError, AttributeError):
# KeyError if cfxr doesn't detect the coords
# AttributeError if ds is a dict
lon = ds['lon']
lat = ds['lat']
return lon, lat
def _get_lon_lat_bounds(ds):
"""Return bounds of lon and lat extracted from ds."""
if 'lat_b' in ds and 'lon_b' in ds:
# Old way.
return ds['lon_b'], ds['lat_b']
# else : cf-xarray way
try:
lon_bnds = ds.cf.get_bounds('longitude')
lat_bnds = ds.cf.get_bounds('latitude')
except KeyError: # bounds are not already present
if ds.cf['longitude'].ndim > 1:
# We cannot infer 2D bounds, raise KeyError as custom "lon_b" is missing.
raise KeyError('lon_b')
lon_name = ds.cf['longitude'].name
lat_name = ds.cf['latitude'].name
ds2 = ds.cf.add_bounds([lon_name, lat_name])
lon_bnds = ds2.cf.get_bounds('longitude')
lat_bnds = ds2.cf.get_bounds('latitude')
# Convert from CF bounds to xESMF bounds.
# order=None is because we don't want to assume the dimension order for 2D bounds.
lon_b = cfxr.bounds_to_vertices(lon_bnds, 'bounds', order=None)
lat_b = cfxr.bounds_to_vertices(lat_bnds, 'bounds', order=None)
return lon_b, lat_b
def ds_to_ESMFgrid(ds, need_bounds=False, periodic=None, append=None):
"""
Convert xarray DataSet or dictionary to ESMF.Grid object.
Parameters
----------
ds : xarray DataSet or dictionary
Contains variables ``lon``, ``lat``,
and optionally ``lon_b``, ``lat_b`` if need_bounds=True.
Shape should be ``(n_lat, n_lon)`` or ``(n_y, n_x)``,
as normal C or Python ordering. Will be then tranposed to F-ordered.
need_bounds : bool, optional
Need cell boundary values?
periodic : bool, optional
Periodic in longitude?
Returns
-------
grid : ESMF.Grid object
"""
# use np.asarray(dr) instead of dr.values, so it also works for dictionary
lon, lat = _get_lon_lat(ds)
lon, lat = as_2d_mesh(np.asarray(lon), np.asarray(lat))
if 'mask' in ds:
mask = np.asarray(ds['mask'])
else:
mask = None
# tranpose the arrays so they become Fortran-ordered
if mask is not None:
grid = Grid.from_xarray(lon.T, lat.T, periodic=periodic, mask=mask.T)
else:
grid = Grid.from_xarray(lon.T, lat.T, periodic=periodic, mask=None)
if need_bounds:
lon_b, lat_b = _get_lon_lat_bounds(ds)
lon_b, lat_b = as_2d_mesh(np.asarray(lon_b), np.asarray(lat_b))
add_corner(grid, lon_b.T, lat_b.T)
return grid, lon.shape
def ds_to_ESMFlocstream(ds):
"""
Convert xarray DataSet or dictionary to ESMF.LocStream object.
Parameters
----------
ds : xarray DataSet or dictionary
Contains variables ``lon``, ``lat``.
Returns
-------
locstream : ESMF.LocStream object
"""
lon, lat = _get_lon_lat(ds)
lon, lat = np.asarray(lon), np.asarray(lat)
if len(lon.shape) > 1:
raise ValueError('lon can only be 1d')
if len(lat.shape) > 1:
raise ValueError('lat can only be 1d')
assert lon.shape == lat.shape
locstream = LocStream.from_xarray(lon, lat)
return locstream, (1,) + lon.shape
def polys_to_ESMFmesh(polys):
"""
Convert a sequence of shapely Polygons to a ESMF.Mesh object.
MultiPolygons are split in their polygon parts and holes are ignored.
Parameters
----------
polys : sequence of shapely Polygon or MultiPolygon
Returns
-------
exterior : ESMF.Mesh
A mesh where elements are the exterior rings of the polygons
tuple
The shape of the mesh : (1, N_elements)
"""
ext, holes, _, _ = split_polygons_and_holes(polys)
if len(holes) > 0:
warnings.warn(
'Some passed polygons have holes, those are not represented in the returned Mesh.'
)
return Mesh.from_polygons(ext), (1, len(ext))
class BaseRegridder(object):
def __init__(
self,
grid_in,
grid_out,
method,
filename=None,
reuse_weights=False,
extrap_method=None,
extrap_dist_exponent=None,
extrap_num_src_pnts=None,
weights=None,
ignore_degenerate=None,
):
"""
Base xESMF regridding class supporting ESMF objects: `Grid`, `Mesh` and `LocStream`.
Create or use existing subclasses to support other types of input objects. See for example `Regridder`
to regrid `xarray.DataArray` objects, or `SpatialAverager` to average grids over regions defined by polygons.
Parameters
----------
grid_in, grid_out : ESMF Grid or Locstream or Mesh
Input and output grid structures as ESMFpy objects.
method : str
Regridding method. Options are
- 'bilinear'
- 'conservative', **need grid corner information**
- 'conservative_normed', **need grid corner information**
- 'patch'
- 'nearest_s2d'
- 'nearest_d2s'
filename : str, optional
Name for the weight file. The default naming scheme is::
{method}_{Ny_in}x{Nx_in}_{Ny_out}x{Nx_out}.nc
e.g. bilinear_400x600_300x400.nc
reuse_weights : bool, optional
Whether to read existing weight file to save computing time.
False by default (i.e. re-compute, not reuse).
extrap_method : str, optional
Extrapolation method. Options are
- 'inverse_dist'
- 'nearest_s2d'
extrap_dist_exponent : float, optional
The exponent to raise the distance to when calculating weights for the
extrapolation method. If none are specified, defaults to 2.0
extrap_num_src_pnts : int, optional
The number of source points to use for the extrapolation methods
that use more than one source point. If none are specified, defaults to 8
weights : None, coo_matrix, dict, str, Dataset, Path,
Regridding weights, stored as
- a scipy.sparse COO matrix,
- a dictionary with keys `row_dst`, `col_src` and `weights`,
- an xarray Dataset with data variables `col`, `row` and `S`,
- or a path to a netCDF file created by ESMF.
If None, compute the weights.
ignore_degenerate : bool, optional
If False (default), raise error if grids contain degenerated cells
(i.e. triangles or lines, instead of quadrilaterals)
Returns
-------
baseregridder : xESMF BaseRegridder object
"""
self.grid_in = grid_in
self.grid_out = grid_out
self.method = method
self.reuse_weights = reuse_weights
self.extrap_method = extrap_method
self.extrap_dist_exponent = extrap_dist_exponent
self.extrap_num_src_pnts = extrap_num_src_pnts
self.ignore_degenerate = ignore_degenerate
self.periodic = getattr(self.grid_in, 'periodic_dim', None) is not None
self.sequence_in = isinstance(self.grid_in, (LocStream, Mesh))
self.sequence_out = isinstance(self.grid_out, (LocStream, Mesh))
# record grid shape information
# We need to invert Grid shapes to respect xESMF's convention (y, x).
self.shape_in = self.grid_in.get_shape()[::-1]
self.shape_out = self.grid_out.get_shape()[::-1]
self.n_in = self.shape_in[0] * self.shape_in[1]
self.n_out = self.shape_out[0] * self.shape_out[1]
# some logic about reusing weights with either filename or weights args
if reuse_weights and (filename is None) and (weights is None):
raise ValueError('To reuse weights, you need to provide either filename or weights.')
if not reuse_weights and weights is None:
weights = self._compute_weights() # Dictionary of weights
else:
weights = filename if filename is not None else weights
assert weights is not None
# Convert weights, whatever their format, to a sparse coo matrix
self.weights = read_weights(weights, self.n_in, self.n_out)
# replace zeros by NaN in mask
if self.grid_out.mask is not None and self.grid_out.mask[0] is not None:
self.weights = add_nans_to_weights(self.weights)
# follows legacy logic of writing weights if filename is provided
if filename is not None and not reuse_weights:
self.to_netcdf(filename=filename)
# set default weights filename if none given
self.filename = self._get_default_filename() if filename is None else filename
@property
def A(self):
message = (
'regridder.A is deprecated and will be removed in future versions. '
'Use regridder.weights instead.'
)
warnings.warn(message, DeprecationWarning)
# DeprecationWarning seems to be ignored by certain Python environments
# Also print to make sure users notice this.
print(message)
return self.weights
def _get_default_filename(self):
# e.g. bilinear_400x600_300x400.nc
filename = '{0}_{1}x{2}_{3}x{4}'.format(
self.method,
self.shape_in[0],
self.shape_in[1],
self.shape_out[0],
self.shape_out[1],
)
if self.periodic:
filename += '_peri.nc'
else:
filename += '.nc'
return filename
def _compute_weights(self):
regrid = esmf_regrid_build(
self.grid_in,
self.grid_out,
self.method,
extrap_method=self.extrap_method,
extrap_dist_exponent=self.extrap_dist_exponent,
extrap_num_src_pnts=self.extrap_num_src_pnts,
ignore_degenerate=self.ignore_degenerate,
)
w = regrid.get_weights_dict(deep_copy=True)
esmf_regrid_finalize(regrid) # only need weights, not regrid object
return w
def __call__(self, indata, keep_attrs=False):
"""
Apply regridding to input data.
Parameters
----------
indata : numpy array, dask array, xarray DataArray or Dataset.
The rightmost two dimensions must be the same as ``ds_in``.
Can have arbitrary additional dimensions.
Examples of valid shapes
- (n_lat, n_lon), if ``ds_in`` has shape (n_lat, n_lon)
- (n_time, n_lev, n_y, n_x), if ``ds_in`` has shape (Ny, n_x)
Transpose your input data if the horizontal dimensions are not
the rightmost two dimensions.
keep_attrs : bool, optional
Keep attributes for xarray DataArrays or Datasets.
Defaults to False.
Returns
-------
outdata : Data type is the same as input data type.
On the same horizontal grid as ``ds_out``,
with extra dims in ``dr_in``.
Assuming ``ds_out`` has the shape of (n_y_out, n_x_out),
examples of returning shapes are
- (n_y_out, n_x_out), if ``dr_in`` is 2D
- (n_time, n_lev, n_y_out, n_x_out), if ``dr_in`` has shape
(n_time, n_lev, n_y, n_x)
"""
if isinstance(indata, np.ndarray):
return self.regrid_numpy(indata)
elif isinstance(indata, dask_array_type):
return self.regrid_dask(indata)
elif isinstance(indata, xr.DataArray):
return self.regrid_dataarray(indata, keep_attrs=keep_attrs)
elif isinstance(indata, xr.Dataset):
return self.regrid_dataset(indata, keep_attrs=keep_attrs)
else:
raise TypeError(
'input must be numpy array, dask array, ' 'xarray DataArray or Dataset!'
)
@staticmethod
def _regrid_array(indata, *, weights, shape_in, shape_out, sequence_in):
if sequence_in:
indata = np.expand_dims(indata, axis=-2)
return apply_weights(weights, indata, shape_in, shape_out)
@property
def _regrid_kwargs(self):
return {
'weights': self.weights,
'sequence_in': self.sequence_in,
'shape_in': self.shape_in,
'shape_out': self.shape_out,
}
def regrid_numpy(self, indata):
"""See __call__()."""
outdata = self._regrid_array(indata, **self._regrid_kwargs)
return outdata
def regrid_dask(self, indata):
"""See __call__()."""
extra_chunk_shape = indata.chunksize[0:-2]
output_chunk_shape = extra_chunk_shape + self.shape_out
outdata = da.map_blocks(
self._regrid_array,
indata,
dtype=float,
chunks=output_chunk_shape,
**self._regrid_kwargs,
)
return outdata
def regrid_dataarray(self, dr_in, keep_attrs=False):
"""See __call__()."""
input_horiz_dims, temp_horiz_dims = self._parse_xrinput(dr_in)
dr_out = xr.apply_ufunc(
self._regrid_array,
dr_in,
kwargs=self._regrid_kwargs,
input_core_dims=[input_horiz_dims],
output_core_dims=[temp_horiz_dims],
dask='parallelized',
output_dtypes=[float],
output_sizes={
temp_horiz_dims[0]: self.shape_out[0],
temp_horiz_dims[1]: self.shape_out[1],
},
keep_attrs=keep_attrs,
)
return self._format_xroutput(dr_out, temp_horiz_dims)
def regrid_dataset(self, ds_in, keep_attrs=False):
"""See __call__()."""
# most logic is the same as regrid_dataarray()
# the major caution is that some data variables might not contain
# the correct horizontal dimension names.
# get the first data variable to infer input_core_dims
name, dr_in = next(iter(ds_in.items()))
input_horiz_dims, temp_horiz_dims = self._parse_xrinput(dr_in)
# help user debugging invalid horizontal dimensions
print(
'using dimensions {} from data variable {} '
'as the horizontal dimensions for this dataset.'.format(input_horiz_dims, name)
)
ds_out = xr.apply_ufunc(
self._regrid_array,
ds_in,
kwargs=self._regrid_kwargs,
input_core_dims=[input_horiz_dims],
output_core_dims=[temp_horiz_dims],
dask='parallelized',
output_dtypes=[float],
output_sizes={
temp_horiz_dims[0]: self.shape_out[0],
temp_horiz_dims[1]: self.shape_out[1],
},
keep_attrs=keep_attrs,
)
return self._format_xroutput(ds_out, temp_horiz_dims)
def _parse_xrinput(self, dr_in):
# Get input horiz dim names and set output horiz dim names
if self.sequence_in:
input_horiz_dims = dr_in.dims[-1:]
else:
input_horiz_dims = dr_in.dims[-2:]
if self.sequence_out:
temp_horiz_dims = ['dummy', 'locations']
else:
temp_horiz_dims = [s + '_new' for s in input_horiz_dims]
if self.sequence_in and not self.sequence_out:
temp_horiz_dims = ['dummy_new'] + temp_horiz_dims
return input_horiz_dims, temp_horiz_dims
def _format_xroutput(self, out, new_dims=None):
out.attrs['regrid_method'] = self.method
return out
def __repr__(self):
info = (
'xESMF Regridder \n'
'Regridding algorithm: {} \n'
'Weight filename: {} \n'
'Reuse pre-computed weights? {} \n'
'Input grid shape: {} \n'
'Output grid shape: {} \n'
'Periodic in longitude? {}'.format(
self.method,
self.filename,
self.reuse_weights,
self.shape_in,
self.shape_out,
self.periodic,
)
)
return info
def to_netcdf(self, filename=None):
"""Save weights to disk as a netCDF file."""
if filename is None:
filename = self.filename
w = self.weights
dim = 'n_s'
ds = xr.Dataset({'S': (dim, w.data), 'col': (dim, w.col + 1), 'row': (dim, w.row + 1)})
ds.to_netcdf(filename)
return filename
class Regridder(BaseRegridder):
def __init__(
self,
ds_in,
ds_out,
method,
locstream_in=False,
locstream_out=False,
periodic=False,
**kwargs,
):
"""
Make xESMF regridder
Parameters
----------
ds_in, ds_out : xarray DataSet, or dictionary
Contain input and output grid coordinates.
All variables that the cf-xarray accessor understand are accepted.
Otherwise, look for ``lon``, ``lat``,
optionally ``lon_b``, ``lat_b`` for conservative methods,
and ``mask``. Note that for `mask`, the ESMF convention is used,
where masked values are identified by 0, and non-masked values by 1.
For conservative methods, if bounds are not present, they will be
computed using `cf-xarray` (only 1D coordinates are currently supported).
Shape can be 1D (n_lon,) and (n_lat,) for rectilinear grids,
or 2D (n_y, n_x) for general curvilinear grids.
Shape of bounds should be (n+1,) or (n_y+1, n_x+1).
CF-bounds (shape (n, 2) or (n, m, 4)) are also accepted if they are
accessible through the cf-xarray accessor.
If either dataset includes a 2d mask variable, that will also be
used to inform the regridding.
method : str
Regridding method. Options are
- 'bilinear'
- 'conservative', **need grid corner information**
- 'conservative_normed', **need grid corner information**
- 'patch'
- 'nearest_s2d'
- 'nearest_d2s'
periodic : bool, optional
Periodic in longitude? Default to False.
Only useful for global grids with non-conservative regridding.
Will be forced to False for conservative regridding.
filename : str, optional
Name for the weight file. The default naming scheme is::
{method}_{Ny_in}x{Nx_in}_{Ny_out}x{Nx_out}.nc
e.g. bilinear_400x600_300x400.nc
reuse_weights : bool, optional
Whether to read existing weight file to save computing time.
False by default (i.e. re-compute, not reuse).
extrap_method : str, optional
Extrapolation method. Options are
- 'inverse_dist'
- 'nearest_s2d'
extrap_dist_exponent : float, optional
The exponent to raise the distance to when calculating weights for the
extrapolation method. If none are specified, defaults to 2.0
extrap_num_src_pnts : int, optional
The number of source points to use for the extrapolation methods
that use more than one source point. If none are specified, defaults to 8
weights : None, coo_matrix, dict, str, Dataset, Path,
Regridding weights, stored as
- a scipy.sparse COO matrix,
- a dictionary with keys `row_dst`, `col_src` and `weights`,
- an xarray Dataset with data variables `col`, `row` and `S`,
- or a path to a netCDF file created by ESMF.
If None, compute the weights.
ignore_degenerate : bool, optional
If False (default), raise error if grids contain degenerated cells
(i.e. triangles or lines, instead of quadrilaterals)
Returns
-------
regridder : xESMF regridder object
"""
methods_avail_ls_in = ['nearest_s2d', 'nearest_d2s']
methods_avail_ls_out = ['bilinear', 'patch'] + methods_avail_ls_in
if locstream_in and method not in methods_avail_ls_in:
raise ValueError(
f'locstream input is only available for method in {methods_avail_ls_in}'
)
if locstream_out and method not in methods_avail_ls_out:
raise ValueError(
f'locstream output is only available for method in {methods_avail_ls_out}'
)
# record basic switches
if method in ['conservative', 'conservative_normed']:
need_bounds = True
periodic = False # bound shape will not be N+1 for periodic grid
else:
need_bounds = False
# construct ESMF grid, with some shape checking
if locstream_in:
grid_in, shape_in = ds_to_ESMFlocstream(ds_in)
else:
grid_in, shape_in = ds_to_ESMFgrid(ds_in, need_bounds=need_bounds, periodic=periodic)
if locstream_out:
grid_out, shape_out = ds_to_ESMFlocstream(ds_out)
else:
grid_out, shape_out = ds_to_ESMFgrid(ds_out, need_bounds=need_bounds)
# Create the BaseRegridder
super().__init__(grid_in, grid_out, method, **kwargs)
# record output grid and metadata
lon_out, lat_out = _get_lon_lat(ds_out)
self._lon_out, self._lat_out = np.asarray(lon_out), np.asarray(lat_out)
self._coord_names = dict(
lon=lon_out.name if isinstance(lon_out, DataArray) else 'lon',
lat=lat_out.name if isinstance(lat_out, DataArray) else 'lat',
)
self._lon_out_attrs = lon_out.attrs if isinstance(lon_out, DataArray) else {}
self._lat_out_attrs = lat_out.attrs if isinstance(lat_out, DataArray) else {}
if self._lon_out.ndim == 2:
try:
self.lon_dim = self.lat_dim = lon_out.dims
except Exception:
self.lon_dim = self.lat_dim = ('y', 'x')
self.out_horiz_dims = self.lon_dim
elif self._lon_out.ndim == 1:
try:
(self.lon_dim,) = lon_out.dims
(self.lat_dim,) = lat_out.dims
except Exception:
self.lon_dim = 'lon'
self.lat_dim = 'lat'
self.out_horiz_dims = (self.lat_dim, self.lon_dim)
def _format_xroutput(self, out, new_dims=None):
if not self.sequence_out and new_dims is not None:
# rename dimension name to match output grid
out = out.rename(
{new_dims[0]: self.out_horiz_dims[0], new_dims[1]: self.out_horiz_dims[1]}
)
# append output horizontal coordinate values
# extra coordinates are automatically tracked by apply_ufunc
lon_args = dict(data=self._lon_out, attrs=self._lon_out_attrs)
lat_args = dict(data=self._lat_out, attrs=self._lat_out_attrs)
if self.sequence_out:
out.coords['lon'] = xr.DataArray(**lon_args, dims=('locations',))
out.coords['lat'] = xr.DataArray(**lat_args, dims=('locations',))
else:
out.coords['lon'] = xr.DataArray(**lon_args, dims=self.lon_dim)
out.coords['lat'] = xr.DataArray(**lat_args, dims=self.lat_dim)
out.attrs['regrid_method'] = self.method
if self.sequence_out:
out = out.squeeze(dim='dummy')
# Use ds_out coordinates
out = out.rename(self._coord_names)
return out
class SpatialAverager(BaseRegridder):
def __init__(
self,
ds_in,
polys,
ignore_holes=False,
periodic=False,
filename=None,
reuse_weights=False,
weights=None,
ignore_degenerate=False,
geom_dim_name='geom',
):
"""Compute the exact average of a gridded array over a geometry.
This uses the ESMF `conservative` regridding method to compute and apply weights
mapping a 2D field unto geometries defined by polygons. The `conservative` method
preserves the areal average of the input field. That is, *the value at each output
grid cell is the average input value over the output grid area*. Here, the output
grid cells are not rectangles defined by four corners, but polygons defined by
multiple vertices (`ESMF.Mesh` objects). The regridding weights thus compute the
areal-average of the input grid over each polygon.
For multi-parts geometries (shapely.MultiPolygon), weights are computed for each
geometry, then added, to compute the average over all geometries.
When polygons include holes, the weights over the holes can either be substracted,
or ignored.
Parameters
----------
ds_in : xr.DataArray or xr.Dataset or dictionary
Contain input and output grid coordinates. Look for variables
``lon``, ``lat``, ``lon_b`` and ``lat_b``.
Optionaly looks for ``mask``, in which case the ESMF convention is used,
where masked values are identified by 0, and non-masked values by 1.
Shape can be 1D (n_lon,) and (n_lat,) for rectilinear grids,
or 2D (n_y, n_x) for general curvilinear grids.
Shape of bounds should be (n+1,) or (n_y+1, n_x+1).
polys : sequence of shapely Polygons and MultiPolygons
Sequence of polygons over which to average `ds_in`.
ignore_holes : bool
Whether to ignore holes in polygons.
Default (True) is to substract the weight of holes from the weight of the polygon.
filename : str, optional
Name for the weight file. The default naming scheme is::
spatialavg_{Ny_in}x{Nx_in}_{Npoly_out}.nc
e.g. spatialavg_400x600_30.nc
reuse_weights : bool, optional
Whether to read existing weight file to save computing time.
False by default (i.e. re-compute, not reuse).
weights : None, coo_matrix, dict, str, Dataset, Path,
Regridding weights, stored as
- a scipy.sparse COO matrix,
- a dictionary with keys `row_dst`, `col_src` and `weights`,
- an xarray Dataset with data variables `col`, `row` and `S`,
- or a path to a netCDF file created by ESMF.
If None, compute the weights.
ignore_degenerate : bool, optional
If False (default), raise error if grids contain degenerated cells
(i.e. triangles or lines, instead of quadrilaterals)
self.geom_dim_name : str
Name of dimension along which averages for each polygon are stored.
Returns
-------
xarray.DataArray
Average over polygons along `geom_dim_name` dimension. The `lon` and
`lat` coordinates are the polygon centroid coordinates.
References
----------
This approach is inspired by `OCGIS <https://github.com/NCPP/ocgis>`_.
"""
self.ignore_holes = ignore_holes
self.polys = polys
self.ignore_degenerate = ignore_degenerate
self.geom_dim_name = geom_dim_name
grid_in, shape_in = ds_to_ESMFgrid(ds_in, need_bounds=True, periodic=periodic)
self.grid_in = grid_in
# Create an output locstream so that the regridder knows the output shape and coords.
# Latitude and longitude coordinates are the polygon centroid.
lon_out, lat_out = _get_lon_lat(ds_in)
if hasattr(lon_out, 'name'):
self._lon_out_name = lon_out.name
self._lat_out_name = lat_out.name
else:
self._lon_out_name = 'lon'
self._lat_out_name = 'lat'
poly_centers = [poly.centroid.xy for poly in polys]
self._lon_out = np.asarray([c[0][0] for c in poly_centers])
self._lat_out = np.asarray([c[1][0] for c in poly_centers])
# We put names 'lon' and 'lat' so ds_to_ESMFlocstream finds them easily.
# _lon_out_name and _lat_out_name are used on the output anyway.
ds_out = {'lon': self._lon_out, 'lat': self._lat_out}
locstream_out, shape_out = ds_to_ESMFlocstream(ds_out)
# BaseRegridder with custom-computed weights and dummy out grid
super().__init__(
grid_in,
locstream_out,
'conservative',
weights=weights,
filename=filename,
reuse_weights=reuse_weights,
ignore_degenerate=ignore_degenerate,
)
def _compute_weights(self):
"""Return weight sparse matrix."""
# Split all (multi-)polygons into single polygons and holes. Keep track of owners.
exts, holes, i_ext, i_hol = split_polygons_and_holes(self.polys)
owners = np.array(i_ext + i_hol)
mesh_ext, shape_ext = polys_to_ESMFmesh(exts)
# Get weights for single polygons and holes
# Stack everything together
reg_ext = BaseRegridder(
mesh_ext, self.grid_in, 'conservative', ignore_degenerate=self.ignore_degenerate
)
if len(holes) > 0 and not self.ignore_holes:
mesh_holes, shape_holes = polys_to_ESMFmesh(holes)
reg_holes = BaseRegridder(
mesh_holes, self.grid_in, 'conservative', ignore_degenerate=self.ignore_degenerate
)
w_all = sps.hstack((reg_ext.weights.tocsc(), -reg_holes.weights.tocsc()))
else:
w_all = reg_ext.weights.tocsc()
# Combine weights of same owner and normalize
weights = _combine_weight_multipoly(w_all, owners)
weights = weights.multiply(1 / weights.sum(axis=0))
return weights.tocoo().T
def _get_default_filename(self):
# e.g. bilinear_400x600_300x400.nc
filename = 'spatialavg_{0}x{1}_{2}.nc'.format(
self.shape_in[0], self.shape_in[1], self.n_out
)
return filename
def __repr__(self):
info = (
'xESMF SpatialAverager \n'
'Weight filename: {} \n'
'Reuse pre-computed weights? {} \n'
'Input grid shape: {} \n'
'Output list length: {} \n'.format(
self.filename, self.reuse_weights, self.shape_in, self.n_out
)
)
return info
def _format_xroutput(self, out, new_dims=None):
out = out.squeeze(dim='dummy')
# rename dimension name to match output grid
out = out.rename(locations=self.geom_dim_name)
# append output horizontal coordinate values
# extra coordinates are automatically tracked by apply_ufunc
out.coords[self._lon_out_name] = xr.DataArray(self._lon_out, dims=(self.geom_dim_name,))
out.coords[self._lat_out_name] = xr.DataArray(self._lat_out, dims=(self.geom_dim_name,))
out.attrs['regrid_method'] = self.method
return out
| [
"dask.array.map_blocks",
"numpy.asarray",
"xarray.Dataset",
"numpy.array",
"xarray.DataArray",
"numpy.expand_dims",
"warnings.warn",
"numpy.meshgrid",
"xarray.apply_ufunc",
"cf_xarray.bounds_to_vertices"
] | [((2146, 2201), 'cf_xarray.bounds_to_vertices', 'cfxr.bounds_to_vertices', (['lon_bnds', '"""bounds"""'], {'order': 'None'}), "(lon_bnds, 'bounds', order=None)\n", (2169, 2201), True, 'import cf_xarray as cfxr\n'), ((2214, 2269), 'cf_xarray.bounds_to_vertices', 'cfxr.bounds_to_vertices', (['lat_bnds', '"""bounds"""'], {'order': 'None'}), "(lat_bnds, 'bounds', order=None)\n", (2237, 2269), True, 'import cf_xarray as cfxr\n'), ((3088, 3103), 'numpy.asarray', 'np.asarray', (['lon'], {}), '(lon)\n', (3098, 3103), True, 'import numpy as np\n'), ((3105, 3120), 'numpy.asarray', 'np.asarray', (['lat'], {}), '(lat)\n', (3115, 3120), True, 'import numpy as np\n'), ((3159, 3181), 'numpy.asarray', 'np.asarray', (["ds['mask']"], {}), "(ds['mask'])\n", (3169, 3181), True, 'import numpy as np\n'), ((4010, 4025), 'numpy.asarray', 'np.asarray', (['lon'], {}), '(lon)\n', (4020, 4025), True, 'import numpy as np\n'), ((4027, 4042), 'numpy.asarray', 'np.asarray', (['lat'], {}), '(lat)\n', (4037, 4042), True, 'import numpy as np\n'), ((4856, 4963), 'warnings.warn', 'warnings.warn', (['"""Some passed polygons have holes, those are not represented in the returned Mesh."""'], {}), "(\n 'Some passed polygons have holes, those are not represented in the returned Mesh.'\n )\n", (4869, 4963), False, 'import warnings\n'), ((10014, 10056), 'warnings.warn', 'warnings.warn', (['message', 'DeprecationWarning'], {}), '(message, DeprecationWarning)\n', (10027, 10056), False, 'import warnings\n'), ((13830, 13939), 'dask.array.map_blocks', 'da.map_blocks', (['self._regrid_array', 'indata'], {'dtype': 'float', 'chunks': 'output_chunk_shape'}), '(self._regrid_array, indata, dtype=float, chunks=\n output_chunk_shape, **self._regrid_kwargs)\n', (13843, 13939), True, 'import dask.array as da\n'), ((14208, 14527), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['self._regrid_array', 'dr_in'], {'kwargs': 'self._regrid_kwargs', 'input_core_dims': '[input_horiz_dims]', 'output_core_dims': '[temp_horiz_dims]', 'dask': '"""parallelized"""', 'output_dtypes': '[float]', 'output_sizes': '{temp_horiz_dims[0]: self.shape_out[0], temp_horiz_dims[1]: self.shape_out[1]}', 'keep_attrs': 'keep_attrs'}), "(self._regrid_array, dr_in, kwargs=self._regrid_kwargs,\n input_core_dims=[input_horiz_dims], output_core_dims=[temp_horiz_dims],\n dask='parallelized', output_dtypes=[float], output_sizes={\n temp_horiz_dims[0]: self.shape_out[0], temp_horiz_dims[1]: self.\n shape_out[1]}, keep_attrs=keep_attrs)\n", (14222, 14527), True, 'import xarray as xr\n'), ((15441, 15760), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['self._regrid_array', 'ds_in'], {'kwargs': 'self._regrid_kwargs', 'input_core_dims': '[input_horiz_dims]', 'output_core_dims': '[temp_horiz_dims]', 'dask': '"""parallelized"""', 'output_dtypes': '[float]', 'output_sizes': '{temp_horiz_dims[0]: self.shape_out[0], temp_horiz_dims[1]: self.shape_out[1]}', 'keep_attrs': 'keep_attrs'}), "(self._regrid_array, ds_in, kwargs=self._regrid_kwargs,\n input_core_dims=[input_horiz_dims], output_core_dims=[temp_horiz_dims],\n dask='parallelized', output_dtypes=[float], output_sizes={\n temp_horiz_dims[0]: self.shape_out[0], temp_horiz_dims[1]: self.\n shape_out[1]}, keep_attrs=keep_attrs)\n", (15455, 15760), True, 'import xarray as xr\n'), ((17490, 17576), 'xarray.Dataset', 'xr.Dataset', (["{'S': (dim, w.data), 'col': (dim, w.col + 1), 'row': (dim, w.row + 1)}"], {}), "({'S': (dim, w.data), 'col': (dim, w.col + 1), 'row': (dim, w.row +\n 1)})\n", (17500, 17576), True, 'import xarray as xr\n'), ((29070, 29113), 'numpy.asarray', 'np.asarray', (['[c[0][0] for c in poly_centers]'], {}), '([c[0][0] for c in poly_centers])\n', (29080, 29113), True, 'import numpy as np\n'), ((29138, 29181), 'numpy.asarray', 'np.asarray', (['[c[1][0] for c in poly_centers]'], {}), '([c[1][0] for c in poly_centers])\n', (29148, 29181), True, 'import numpy as np\n'), ((30055, 30078), 'numpy.array', 'np.array', (['(i_ext + i_hol)'], {}), '(i_ext + i_hol)\n', (30063, 30078), True, 'import numpy as np\n'), ((31976, 32031), 'xarray.DataArray', 'xr.DataArray', (['self._lon_out'], {'dims': '(self.geom_dim_name,)'}), '(self._lon_out, dims=(self.geom_dim_name,))\n', (31988, 32031), True, 'import xarray as xr\n'), ((32073, 32128), 'xarray.DataArray', 'xr.DataArray', (['self._lat_out'], {'dims': '(self.geom_dim_name,)'}), '(self._lat_out, dims=(self.geom_dim_name,))\n', (32085, 32128), True, 'import xarray as xr\n'), ((760, 781), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (771, 781), True, 'import numpy as np\n'), ((3561, 3578), 'numpy.asarray', 'np.asarray', (['lon_b'], {}), '(lon_b)\n', (3571, 3578), True, 'import numpy as np\n'), ((3580, 3597), 'numpy.asarray', 'np.asarray', (['lat_b'], {}), '(lat_b)\n', (3590, 3597), True, 'import numpy as np\n'), ((13137, 13168), 'numpy.expand_dims', 'np.expand_dims', (['indata'], {'axis': '(-2)'}), '(indata, axis=-2)\n', (13151, 13168), True, 'import numpy as np\n'), ((22606, 22625), 'numpy.asarray', 'np.asarray', (['lon_out'], {}), '(lon_out)\n', (22616, 22625), True, 'import numpy as np\n'), ((22627, 22646), 'numpy.asarray', 'np.asarray', (['lat_out'], {}), '(lat_out)\n', (22637, 22646), True, 'import numpy as np\n'), ((24210, 24255), 'xarray.DataArray', 'xr.DataArray', ([], {'dims': "('locations',)"}), "(**lon_args, dims=('locations',))\n", (24222, 24255), True, 'import xarray as xr\n'), ((24288, 24333), 'xarray.DataArray', 'xr.DataArray', ([], {'dims': "('locations',)"}), "(**lat_args, dims=('locations',))\n", (24300, 24333), True, 'import xarray as xr\n'), ((24380, 24423), 'xarray.DataArray', 'xr.DataArray', ([], {'dims': 'self.lon_dim'}), '(**lon_args, dims=self.lon_dim)\n', (24392, 24423), True, 'import xarray as xr\n'), ((24456, 24499), 'xarray.DataArray', 'xr.DataArray', ([], {'dims': 'self.lat_dim'}), '(**lat_args, dims=self.lat_dim)\n', (24468, 24499), True, 'import xarray as xr\n')] |
import os
import platform
import numpy as np
from simtk import unit
import time
import pytest
from testsystems.relative import hif2a_ligand_pair
from md.builders import build_water_system
from md.minimizer import minimize_host_4d
from fe.free_energy import AbsoluteFreeEnergy
from md.states import CoordsVelBox
from md.ensembles import PotentialEnergyModel, NPTEnsemble
from md.thermostat.moves import UnadjustedLangevinMove
from md.barostat.moves import MonteCarloBarostat, CentroidRescaler
from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center
from md.utils import simulate_npt_traj
from md.thermostat.utils import sample_velocities
from timemachine.lib import LangevinIntegrator, custom_ops
from functools import partial
from timemachine.constants import BOLTZ, ENERGY_UNIT, DISTANCE_UNIT
def test_barostat_zero_interval():
pressure = 1.0 * unit.atmosphere
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 2.5 * unit.nanometer
barostat_interval = 0
seed = 2021
np.random.seed(seed)
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
bound_potentials = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
u_impls = []
for bp in bound_potentials:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
with pytest.raises(RuntimeError):
custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
0,
u_impls,
seed,
)
# Setting it to 1 should be valid.
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
1,
u_impls,
seed,
)
# Setting back to 0 should raise another error
with pytest.raises(RuntimeError):
baro.set_interval(0)
def get_platform_version() -> str:
release_path = "/etc/os-release"
if os.path.isfile(release_path):
# AWS Ubuntu 20.04 doesn't have version in uname...
with open(release_path, "r") as ifs:
for line in ifs.readlines():
if line.startswith("PRETTY_NAME="):
platform_version = line.strip()
else:
platform_version = platform.version()
return platform_version.lower()
def test_barostat_partial_group_idxs():
"""Verify that the barostat can handle a subset of the molecules
rather than all of them. This test only verify that it runs, not the behavior"""
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
barostat_interval = 3
collision_rate = 1.0 / unit.picosecond
seed = 2021
np.random.seed(seed)
pressure = 1.0 * unit.atmosphere
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
# Cut the number of groups in half
group_indices = group_indices[len(group_indices) // 2 :]
lam = 1.0
bound_potentials = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
u_impls = []
for bp in bound_potentials:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(coords, v_0, complex_box, integrator_impl, u_impls, barostat=baro)
ctxt.multiple_steps(np.ones(1000) * lam)
def test_barostat_is_deterministic():
"""Verify that the barostat results in the same box size shift after 1000
steps. This is important to debugging as well as providing the ability to replicate
simulations
"""
platform_version = get_platform_version()
lam = 1.0
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
barostat_interval = 3
collision_rate = 1.0 / unit.picosecond
seed = 2021
np.random.seed(seed)
# OpenEye's AM1 Charging values are OS platform dependent. To ensure that we have deterministic values
# we check against our two most common OS versions, Ubuntu 18.04 and 20.04.
box_vol = 26.869380588831582
lig_charge_vals = np.array(
[1.4572377542719206, -0.37011462071257184, 1.1478267014520305, -4.920284514559682, 0.16985194917937935]
)
if "ubuntu" not in platform_version:
print(f"Test expected to run under ubuntu 20.04 or 18.04, got {platform_version}")
if "18.04" in platform_version:
box_vol = 26.711716908713402
lig_charge_vals[3] = -4.920166483601927
pressure = 1.0 * unit.atmosphere
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
u_impls = []
# Look at the first five atoms and their assigned charges
ligand_charges = sys_params[-1][:, 0][len(min_complex_coords) :][:5]
np.testing.assert_array_almost_equal(lig_charge_vals, ligand_charges, decimal=5)
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(coords, v_0, complex_box, integrator_impl, u_impls, barostat=baro)
ctxt.multiple_steps(np.ones(1000) * lam)
atm_box = ctxt.get_box()
np.testing.assert_almost_equal(compute_box_volume(atm_box), box_vol, decimal=5)
def test_barostat_varying_pressure():
temperature = 300.0 * unit.kelvin
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
barostat_interval = 3
collision_rate = 1.0 / unit.picosecond
seed = 2021
np.random.seed(seed)
# Start out with a very large pressure
pressure = 1000.0 * unit.atmosphere
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
unbound_potentials, sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
lam = 1.0
u_impls = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
baro = custom_ops.MonteCarloBarostat(
coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(coords, v_0, complex_box, integrator_impl, u_impls, barostat=baro)
ctxt.multiple_steps(np.ones(1000) * lam)
ten_atm_box = ctxt.get_box()
ten_atm_box_vol = compute_box_volume(ten_atm_box)
# Expect the box to shrink thanks to the barostat
assert compute_box_volume(complex_box) - ten_atm_box_vol > 0.4
# Set the pressure to 1 bar
baro.set_pressure((1 * unit.atmosphere).value_in_unit(unit.bar))
# Changing the barostat interval resets the barostat step.
baro.set_interval(2)
ctxt.multiple_steps(np.ones(2000) * lam)
atm_box = ctxt.get_box()
# Box will grow thanks to the lower pressure
assert compute_box_volume(atm_box) > ten_atm_box_vol
def test_molecular_ideal_gas():
"""
References
----------
OpenMM testIdealGas
https://github.com/openmm/openmm/blob/d8ef57fed6554ec95684e53768188e1f666405c9/tests/TestMonteCarloBarostat.h#L86-L140
"""
# simulation parameters
initial_waterbox_width = 3.0 * unit.nanometer
timestep = 1.5 * unit.femtosecond
collision_rate = 1.0 / unit.picosecond
n_moves = 10000
barostat_interval = 5
seed = 2021
# thermodynamic parameters
temperatures = np.array([300, 600, 1000]) * unit.kelvin
pressure = 100.0 * unit.bar # very high pressure, to keep the expected volume small
# generate an alchemical system of a waterbox + alchemical ligand:
# effectively discard ligands by running in AbsoluteFreeEnergy mode at lambda = 1.0
mol_a = hif2a_ligand_pair.mol_a
ff = hif2a_ligand_pair.ff
complex_system, complex_coords, complex_box, complex_top = build_water_system(
initial_waterbox_width.value_in_unit(unit.nanometer)
)
min_complex_coords = minimize_host_4d([mol_a], complex_system, complex_coords, ff, complex_box)
afe = AbsoluteFreeEnergy(mol_a, ff)
_unbound_potentials, _sys_params, masses, coords = afe.prepare_host_edge(
ff.get_ordered_params(), complex_system, min_complex_coords
)
# drop the nonbonded potential
unbound_potentials = _unbound_potentials[:-1]
sys_params = _sys_params[:-1]
# get list of molecules for barostat by looking at bond table
harmonic_bond_potential = unbound_potentials[0]
bond_list = get_bond_list(harmonic_bond_potential)
group_indices = get_group_indices(bond_list)
volume_trajs = []
lam = 1.0
relative_tolerance = 1e-2
initial_relative_box_perturbation = 2 * relative_tolerance
n_molecules = complex_top.getNumResidues()
bound_potentials = []
for params, unbound_pot in zip(sys_params, unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
u_impls = []
for bp in bound_potentials:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
# expected volume
md_pressure_unit = ENERGY_UNIT / DISTANCE_UNIT ** 3
pressure_in_md = (pressure * unit.AVOGADRO_CONSTANT_NA).value_in_unit(md_pressure_unit)
expected_volume_in_md = (n_molecules + 1) * BOLTZ * temperatures.value_in_unit(unit.kelvin) / pressure_in_md
for i, temperature in enumerate(temperatures):
# define a thermostat
integrator = LangevinIntegrator(
temperature.value_in_unit(unit.kelvin),
timestep.value_in_unit(unit.picosecond),
collision_rate.value_in_unit(unit.picosecond ** -1),
masses,
seed,
)
integrator_impl = integrator.impl()
v_0 = sample_velocities(masses * unit.amu, temperature)
# rescale the box to be approximately the desired box volume already
rescaler = CentroidRescaler(group_indices)
initial_volume = compute_box_volume(complex_box)
initial_center = compute_box_center(complex_box)
length_scale = ((1 + initial_relative_box_perturbation) * expected_volume_in_md[i] / initial_volume) ** (
1.0 / 3
)
new_coords = rescaler.scale_centroids(coords, initial_center, length_scale)
new_box = complex_box * length_scale
baro = custom_ops.MonteCarloBarostat(
new_coords.shape[0],
pressure.value_in_unit(unit.bar),
temperature.value_in_unit(unit.kelvin),
group_indices,
barostat_interval,
u_impls,
seed,
)
ctxt = custom_ops.Context(new_coords, v_0, new_box, integrator_impl, u_impls, barostat=baro)
vols = []
for move in range(n_moves // barostat_interval):
ctxt.multiple_steps(np.ones(barostat_interval))
new_box = ctxt.get_box()
volume = np.linalg.det(new_box)
vols.append(volume)
volume_trajs.append(vols)
equil_time = len(volume_trajs[0]) // 2 # TODO: don't hard-code this?
actual_volume_in_md = np.array([np.mean(volume_traj[equil_time:]) for volume_traj in volume_trajs])
np.testing.assert_allclose(actual=actual_volume_in_md, desired=expected_volume_in_md, rtol=relative_tolerance)
| [
"numpy.array",
"md.barostat.utils.get_bond_list",
"platform.version",
"numpy.mean",
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_allclose",
"numpy.asarray",
"fe.free_energy.AbsoluteFreeEnergy",
"numpy.random.seed",
"md.thermostat.utils.sample_velocities",
"timemachine.lib.cus... | [((1057, 1077), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1071, 1077), True, 'import numpy as np\n'), ((1306, 1335), 'fe.free_energy.AbsoluteFreeEnergy', 'AbsoluteFreeEnergy', (['mol_a', 'ff'], {}), '(mol_a, ff)\n', (1324, 1335), False, 'from fe.free_energy import AbsoluteFreeEnergy\n'), ((1618, 1656), 'md.barostat.utils.get_bond_list', 'get_bond_list', (['harmonic_bond_potential'], {}), '(harmonic_bond_potential)\n', (1631, 1656), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((1677, 1705), 'md.barostat.utils.get_group_indices', 'get_group_indices', (['bond_list'], {}), '(bond_list)\n', (1694, 1705), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((2785, 2813), 'os.path.isfile', 'os.path.isfile', (['release_path'], {}), '(release_path)\n', (2799, 2813), False, 'import os\n'), ((3568, 3588), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3582, 3588), True, 'import numpy as np\n'), ((3869, 3943), 'md.minimizer.minimize_host_4d', 'minimize_host_4d', (['[mol_a]', 'complex_system', 'complex_coords', 'ff', 'complex_box'], {}), '([mol_a], complex_system, complex_coords, ff, complex_box)\n', (3885, 3943), False, 'from md.minimizer import minimize_host_4d\n'), ((3954, 3983), 'fe.free_energy.AbsoluteFreeEnergy', 'AbsoluteFreeEnergy', (['mol_a', 'ff'], {}), '(mol_a, ff)\n', (3972, 3983), False, 'from fe.free_energy import AbsoluteFreeEnergy\n'), ((4270, 4308), 'md.barostat.utils.get_bond_list', 'get_bond_list', (['harmonic_bond_potential'], {}), '(harmonic_bond_potential)\n', (4283, 4308), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((4329, 4357), 'md.barostat.utils.get_group_indices', 'get_group_indices', (['bond_list'], {}), '(bond_list)\n', (4346, 4357), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((5073, 5122), 'md.thermostat.utils.sample_velocities', 'sample_velocities', (['(masses * unit.amu)', 'temperature'], {}), '(masses * unit.amu, temperature)\n', (5090, 5122), False, 'from md.thermostat.utils import sample_velocities\n'), ((5380, 5469), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['coords', 'v_0', 'complex_box', 'integrator_impl', 'u_impls'], {'barostat': 'baro'}), '(coords, v_0, complex_box, integrator_impl, u_impls,\n barostat=baro)\n', (5398, 5469), False, 'from timemachine.lib import LangevinIntegrator, custom_ops\n'), ((6016, 6036), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6030, 6036), True, 'import numpy as np\n'), ((6280, 6398), 'numpy.array', 'np.array', (['[1.4572377542719206, -0.37011462071257184, 1.1478267014520305, -\n 4.920284514559682, 0.16985194917937935]'], {}), '([1.4572377542719206, -0.37011462071257184, 1.1478267014520305, -\n 4.920284514559682, 0.16985194917937935])\n', (6288, 6398), True, 'import numpy as np\n'), ((6942, 7016), 'md.minimizer.minimize_host_4d', 'minimize_host_4d', (['[mol_a]', 'complex_system', 'complex_coords', 'ff', 'complex_box'], {}), '([mol_a], complex_system, complex_coords, ff, complex_box)\n', (6958, 7016), False, 'from md.minimizer import minimize_host_4d\n'), ((7027, 7056), 'fe.free_energy.AbsoluteFreeEnergy', 'AbsoluteFreeEnergy', (['mol_a', 'ff'], {}), '(mol_a, ff)\n', (7045, 7056), False, 'from fe.free_energy import AbsoluteFreeEnergy\n'), ((7343, 7381), 'md.barostat.utils.get_bond_list', 'get_bond_list', (['harmonic_bond_potential'], {}), '(harmonic_bond_potential)\n', (7356, 7381), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((7402, 7430), 'md.barostat.utils.get_group_indices', 'get_group_indices', (['bond_list'], {}), '(bond_list)\n', (7419, 7430), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((7588, 7673), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['lig_charge_vals', 'ligand_charges'], {'decimal': '(5)'}), '(lig_charge_vals, ligand_charges, decimal=5\n )\n', (7624, 7673), True, 'import numpy as np\n'), ((8156, 8205), 'md.thermostat.utils.sample_velocities', 'sample_velocities', (['(masses * unit.amu)', 'temperature'], {}), '(masses * unit.amu, temperature)\n', (8173, 8205), False, 'from md.thermostat.utils import sample_velocities\n'), ((8463, 8552), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['coords', 'v_0', 'complex_box', 'integrator_impl', 'u_impls'], {'barostat': 'baro'}), '(coords, v_0, complex_box, integrator_impl, u_impls,\n barostat=baro)\n', (8481, 8552), False, 'from timemachine.lib import LangevinIntegrator, custom_ops\n'), ((8962, 8982), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8976, 8982), True, 'import numpy as np\n'), ((9309, 9383), 'md.minimizer.minimize_host_4d', 'minimize_host_4d', (['[mol_a]', 'complex_system', 'complex_coords', 'ff', 'complex_box'], {}), '([mol_a], complex_system, complex_coords, ff, complex_box)\n', (9325, 9383), False, 'from md.minimizer import minimize_host_4d\n'), ((9394, 9423), 'fe.free_energy.AbsoluteFreeEnergy', 'AbsoluteFreeEnergy', (['mol_a', 'ff'], {}), '(mol_a, ff)\n', (9412, 9423), False, 'from fe.free_energy import AbsoluteFreeEnergy\n'), ((9710, 9748), 'md.barostat.utils.get_bond_list', 'get_bond_list', (['harmonic_bond_potential'], {}), '(harmonic_bond_potential)\n', (9723, 9748), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((9769, 9797), 'md.barostat.utils.get_group_indices', 'get_group_indices', (['bond_list'], {}), '(bond_list)\n', (9786, 9797), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((10318, 10367), 'md.thermostat.utils.sample_velocities', 'sample_velocities', (['(masses * unit.amu)', 'temperature'], {}), '(masses * unit.amu, temperature)\n', (10335, 10367), False, 'from md.thermostat.utils import sample_velocities\n'), ((10625, 10714), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['coords', 'v_0', 'complex_box', 'integrator_impl', 'u_impls'], {'barostat': 'baro'}), '(coords, v_0, complex_box, integrator_impl, u_impls,\n barostat=baro)\n', (10643, 10714), False, 'from timemachine.lib import LangevinIntegrator, custom_ops\n'), ((10811, 10842), 'md.barostat.utils.compute_box_volume', 'compute_box_volume', (['ten_atm_box'], {}), '(ten_atm_box)\n', (10829, 10842), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((12369, 12443), 'md.minimizer.minimize_host_4d', 'minimize_host_4d', (['[mol_a]', 'complex_system', 'complex_coords', 'ff', 'complex_box'], {}), '([mol_a], complex_system, complex_coords, ff, complex_box)\n', (12385, 12443), False, 'from md.minimizer import minimize_host_4d\n'), ((12454, 12483), 'fe.free_energy.AbsoluteFreeEnergy', 'AbsoluteFreeEnergy', (['mol_a', 'ff'], {}), '(mol_a, ff)\n', (12472, 12483), False, 'from fe.free_energy import AbsoluteFreeEnergy\n'), ((12892, 12930), 'md.barostat.utils.get_bond_list', 'get_bond_list', (['harmonic_bond_potential'], {}), '(harmonic_bond_potential)\n', (12905, 12930), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((12951, 12979), 'md.barostat.utils.get_group_indices', 'get_group_indices', (['bond_list'], {}), '(bond_list)\n', (12968, 12979), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((15581, 15696), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', ([], {'actual': 'actual_volume_in_md', 'desired': 'expected_volume_in_md', 'rtol': 'relative_tolerance'}), '(actual=actual_volume_in_md, desired=\n expected_volume_in_md, rtol=relative_tolerance)\n', (15607, 15696), True, 'import numpy as np\n'), ((2033, 2060), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2046, 2060), False, 'import pytest\n'), ((2646, 2673), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2659, 2673), False, 'import pytest\n'), ((3102, 3120), 'platform.version', 'platform.version', ([], {}), '()\n', (3118, 3120), False, 'import platform\n'), ((8658, 8685), 'md.barostat.utils.compute_box_volume', 'compute_box_volume', (['atm_box'], {}), '(atm_box)\n', (8676, 8685), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((11289, 11316), 'md.barostat.utils.compute_box_volume', 'compute_box_volume', (['atm_box'], {}), '(atm_box)\n', (11307, 11316), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((11837, 11863), 'numpy.array', 'np.array', (['[300, 600, 1000]'], {}), '([300, 600, 1000])\n', (11845, 11863), True, 'import numpy as np\n'), ((14162, 14211), 'md.thermostat.utils.sample_velocities', 'sample_velocities', (['(masses * unit.amu)', 'temperature'], {}), '(masses * unit.amu, temperature)\n', (14179, 14211), False, 'from md.thermostat.utils import sample_velocities\n'), ((14309, 14340), 'md.barostat.moves.CentroidRescaler', 'CentroidRescaler', (['group_indices'], {}), '(group_indices)\n', (14325, 14340), False, 'from md.barostat.moves import MonteCarloBarostat, CentroidRescaler\n'), ((14366, 14397), 'md.barostat.utils.compute_box_volume', 'compute_box_volume', (['complex_box'], {}), '(complex_box)\n', (14384, 14397), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((14423, 14454), 'md.barostat.utils.compute_box_center', 'compute_box_center', (['complex_box'], {}), '(complex_box)\n', (14441, 14454), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((15029, 15118), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['new_coords', 'v_0', 'new_box', 'integrator_impl', 'u_impls'], {'barostat': 'baro'}), '(new_coords, v_0, new_box, integrator_impl, u_impls,\n barostat=baro)\n', (15047, 15118), False, 'from timemachine.lib import LangevinIntegrator, custom_ops\n'), ((1831, 1849), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (1841, 1849), True, 'import numpy as np\n'), ((4598, 4616), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (4608, 4616), True, 'import numpy as np\n'), ((5490, 5503), 'numpy.ones', 'np.ones', (['(1000)'], {}), '(1000)\n', (5497, 5503), True, 'import numpy as np\n'), ((7767, 7785), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (7777, 7785), True, 'import numpy as np\n'), ((8573, 8586), 'numpy.ones', 'np.ones', (['(1000)'], {}), '(1000)\n', (8580, 8586), True, 'import numpy as np\n'), ((9929, 9947), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (9939, 9947), True, 'import numpy as np\n'), ((10735, 10748), 'numpy.ones', 'np.ones', (['(1000)'], {}), '(1000)\n', (10742, 10748), True, 'import numpy as np\n'), ((10908, 10939), 'md.barostat.utils.compute_box_volume', 'compute_box_volume', (['complex_box'], {}), '(complex_box)\n', (10926, 10939), False, 'from md.barostat.utils import get_bond_list, get_group_indices, compute_box_volume, compute_box_center\n'), ((11179, 11192), 'numpy.ones', 'np.ones', (['(2000)'], {}), '(2000)\n', (11186, 11192), True, 'import numpy as np\n'), ((13285, 13303), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (13295, 13303), True, 'import numpy as np\n'), ((15308, 15330), 'numpy.linalg.det', 'np.linalg.det', (['new_box'], {}), '(new_box)\n', (15321, 15330), True, 'import numpy as np\n'), ((15508, 15541), 'numpy.mean', 'np.mean', (['volume_traj[equil_time:]'], {}), '(volume_traj[equil_time:])\n', (15515, 15541), True, 'import numpy as np\n'), ((15222, 15248), 'numpy.ones', 'np.ones', (['barostat_interval'], {}), '(barostat_interval)\n', (15229, 15248), True, 'import numpy as np\n')] |
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from harold import (staircase, minimal_realization, hessenberg_realization,
State, Transfer, matrix_slice, cancellation_distance)
import numpy as np
from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like
from numpy.testing import assert_almost_equal, assert_, assert_raises
def test_staircase():
M = array([[-6.5, 0.5, 6.5, -6.5, 0., 1., 0.],
[-0.5, -5.5, -5.5, 5.5, 2., 1., 2.],
[-0.5, 0.5, 0.5, -6.5, 3., 4., 3.],
[-0.5, 0.5, -5.5, -0.5, 3., 2., 3.],
[1., 1., 0., 0., 0., 0., 0.]])
A, B, C, D = matrix_slice(M, (1, 4), corner='sw')
a, b, c, T = staircase(A, B, C, form='o', invert=True)
assert_raises(ValueError, staircase, A, B, C, form='zzz')
assert_almost_equal(a[2:, :2], zeros((2, 2)))
assert_almost_equal(T.T @ A @ T, a)
a, b, c, T = staircase(A, zeros_like(B), C, form='o', invert=True)
assert_almost_equal(b, zeros_like(B))
def test_cancellation_distance():
# Shape checks
assert_raises(ValueError, cancellation_distance, empty((4, 3)), 1)
f, g = eye(4), eye(3)
assert_raises(ValueError, cancellation_distance, f, g)
def test_minimal_realization_State():
M = array([[-6.5, 0.5, 6.5, -6.5, 0., 1., 0.],
[-0.5, -5.5, -5.5, 5.5, 2., 1., 2.],
[-0.5, 0.5, 0.5, -6.5, 3., 4., 3.],
[-0.5, 0.5, -5.5, -0.5, 3., 2., 3.],
[1., 1., 0., 0., 0., 0., 0.]])
G = State(*matrix_slice(M, (1, 4), corner='sw'))
H = minimal_realization(G)
assert H.a.shape == (2, 2)
#
G = State(array([[0., 1., 0., 0., 0.],
[-0.1, -0.5, 1., -1., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[0., 3.5, 1., -2., 2.]]),
array([[0.], [1.], [0.], [0.], [1.]]),
array([[0., 3.5, 1., -1., 0.]]),
array([[1.]]))
H = minimal_realization(G)
assert H.a.shape == (4, 4)
#
G = State(array([[-2., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., -12., 4., 3.]]),
array([[1., 0.], [0., 0.], [0., 0.], [0., 1.]]),
array([[1., -9., 0., 0.], [0., -20., 0., 5.]]),
array([[0., 0.], [0., 1.]]))
H = minimal_realization(G)
assert H.a.shape == (3, 3)
def test_minimal_realization_Transfer():
G = Transfer([1., -8., 28., -58., 67., -30.],
poly([1, 2, 3., 2, 3., 4, 1+(2+1e-6)*1j, 1-(2+1e-6)*1j]))
H_f = minimal_realization(G)
assert_almost_equal(H_f.num, array([[1]]))
H_nf = minimal_realization(G, tol=1e-7)
assert_almost_equal(H_nf.num, array([[1., -7., 21., -37., 30.]]))
H = minimal_realization(Transfer(eye(4)))
assert H._isgain
assert not H._isSISO
H = minimal_realization(State(eye(4)))
assert H._isgain
assert not H._isSISO
def test_simple_hessenberg_trafo():
# Made up discrete time TF
G = Transfer([1., -8., 28., -58., 67., -30.],
poly([1, 2, 3., 2, 3., 4, 1 + 1j, 1 - 1j]), dt=0.1)
H, _ = hessenberg_realization(G, compute_T=1, form='c', invert=1)
assert_(not np.any(H.a[triu_indices_from(H.a, k=2)]))
assert_(not np.any(H.b[:-1, 0]))
H = hessenberg_realization(G, form='o', invert=1)
assert_(not np.any(H.c[0, :-1]))
assert_(not np.any(H.a.T[triu_indices_from(H.a, k=2)]))
| [
"numpy.eye",
"numpy.poly",
"harold.minimal_realization",
"harold.matrix_slice",
"harold.hessenberg_realization",
"numpy.testing.assert_raises",
"numpy.triu_indices_from",
"harold.staircase",
"numpy.any",
"numpy.array",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.empty",
"num... | [((1435, 1647), 'numpy.array', 'array', (['[[-6.5, 0.5, 6.5, -6.5, 0.0, 1.0, 0.0], [-0.5, -5.5, -5.5, 5.5, 2.0, 1.0, \n 2.0], [-0.5, 0.5, 0.5, -6.5, 3.0, 4.0, 3.0], [-0.5, 0.5, -5.5, -0.5, \n 3.0, 2.0, 3.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[-6.5, 0.5, 6.5, -6.5, 0.0, 1.0, 0.0], [-0.5, -5.5, -5.5, 5.5, 2.0, \n 1.0, 2.0], [-0.5, 0.5, 0.5, -6.5, 3.0, 4.0, 3.0], [-0.5, 0.5, -5.5, -\n 0.5, 3.0, 2.0, 3.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (1440, 1647), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((1696, 1732), 'harold.matrix_slice', 'matrix_slice', (['M', '(1, 4)'], {'corner': '"""sw"""'}), "(M, (1, 4), corner='sw')\n", (1708, 1732), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((1750, 1791), 'harold.staircase', 'staircase', (['A', 'B', 'C'], {'form': '"""o"""', 'invert': '(True)'}), "(A, B, C, form='o', invert=True)\n", (1759, 1791), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((1796, 1853), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'staircase', 'A', 'B', 'C'], {'form': '"""zzz"""'}), "(ValueError, staircase, A, B, C, form='zzz')\n", (1809, 1853), False, 'from numpy.testing import assert_almost_equal, assert_, assert_raises\n'), ((1908, 1943), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(T.T @ A @ T)', 'a'], {}), '(T.T @ A @ T, a)\n', (1927, 1943), False, 'from numpy.testing import assert_almost_equal, assert_, assert_raises\n'), ((2213, 2267), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'cancellation_distance', 'f', 'g'], {}), '(ValueError, cancellation_distance, f, g)\n', (2226, 2267), False, 'from numpy.testing import assert_almost_equal, assert_, assert_raises\n'), ((2316, 2528), 'numpy.array', 'array', (['[[-6.5, 0.5, 6.5, -6.5, 0.0, 1.0, 0.0], [-0.5, -5.5, -5.5, 5.5, 2.0, 1.0, \n 2.0], [-0.5, 0.5, 0.5, -6.5, 3.0, 4.0, 3.0], [-0.5, 0.5, -5.5, -0.5, \n 3.0, 2.0, 3.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[-6.5, 0.5, 6.5, -6.5, 0.0, 1.0, 0.0], [-0.5, -5.5, -5.5, 5.5, 2.0, \n 1.0, 2.0], [-0.5, 0.5, 0.5, -6.5, 3.0, 4.0, 3.0], [-0.5, 0.5, -5.5, -\n 0.5, 3.0, 2.0, 3.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (2321, 2528), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2621, 2643), 'harold.minimal_realization', 'minimal_realization', (['G'], {}), '(G)\n', (2640, 2643), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((3042, 3064), 'harold.minimal_realization', 'minimal_realization', (['G'], {}), '(G)\n', (3061, 3064), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((3439, 3461), 'harold.minimal_realization', 'minimal_realization', (['G'], {}), '(G)\n', (3458, 3461), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((3671, 3693), 'harold.minimal_realization', 'minimal_realization', (['G'], {}), '(G)\n', (3690, 3693), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((3752, 3785), 'harold.minimal_realization', 'minimal_realization', (['G'], {'tol': '(1e-07)'}), '(G, tol=1e-07)\n', (3771, 3785), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((4235, 4293), 'harold.hessenberg_realization', 'hessenberg_realization', (['G'], {'compute_T': '(1)', 'form': '"""c"""', 'invert': '(1)'}), "(G, compute_T=1, form='c', invert=1)\n", (4257, 4293), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((4397, 4442), 'harold.hessenberg_realization', 'hessenberg_realization', (['G'], {'form': '"""o"""', 'invert': '(1)'}), "(G, form='o', invert=1)\n", (4419, 4442), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((1889, 1902), 'numpy.zeros', 'zeros', (['(2, 2)'], {}), '((2, 2))\n', (1894, 1902), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((1974, 1987), 'numpy.zeros_like', 'zeros_like', (['B'], {}), '(B)\n', (1984, 1987), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2042, 2055), 'numpy.zeros_like', 'zeros_like', (['B'], {}), '(B)\n', (2052, 2055), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2165, 2178), 'numpy.empty', 'empty', (['(4, 3)'], {}), '((4, 3))\n', (2170, 2178), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2194, 2200), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (2197, 2200), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2202, 2208), 'numpy.eye', 'eye', (['(3)'], {}), '(3)\n', (2205, 2208), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2695, 2846), 'numpy.array', 'array', (['[[0.0, 1.0, 0.0, 0.0, 0.0], [-0.1, -0.5, 1.0, -1.0, 0.0], [0.0, 0.0, 0.0, \n 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 3.5, 1.0, -2.0, 2.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0, 0.0], [-0.1, -0.5, 1.0, -1.0, 0.0], [0.0, 0.0, \n 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 3.5, 1.0, -2.0, 2.0]])\n', (2700, 2846), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2919, 2961), 'numpy.array', 'array', (['[[0.0], [1.0], [0.0], [0.0], [1.0]]'], {}), '([[0.0], [1.0], [0.0], [0.0], [1.0]])\n', (2924, 2961), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2972, 3007), 'numpy.array', 'array', (['[[0.0, 3.5, 1.0, -1.0, 0.0]]'], {}), '([[0.0, 3.5, 1.0, -1.0, 0.0]])\n', (2977, 3007), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3019, 3033), 'numpy.array', 'array', (['[[1.0]]'], {}), '([[1.0]])\n', (3024, 3033), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3116, 3219), 'numpy.array', 'array', (['[[-2.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, -\n 12.0, 4.0, 3.0]]'], {}), '([[-2.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], [\n 0.0, -12.0, 4.0, 3.0]])\n', (3121, 3219), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3277, 3332), 'numpy.array', 'array', (['[[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0]])\n', (3282, 3332), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3340, 3394), 'numpy.array', 'array', (['[[1.0, -9.0, 0.0, 0.0], [0.0, -20.0, 0.0, 5.0]]'], {}), '([[1.0, -9.0, 0.0, 0.0], [0.0, -20.0, 0.0, 5.0]])\n', (3345, 3394), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3402, 3433), 'numpy.array', 'array', (['[[0.0, 0.0], [0.0, 1.0]]'], {}), '([[0.0, 0.0], [0.0, 1.0]])\n', (3407, 3433), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3603, 3679), 'numpy.poly', 'poly', (['[1, 2, 3.0, 2, 3.0, 4, 1 + (2 + 1e-06) * 1.0j, 1 - (2 + 1e-06) * 1.0j]'], {}), '([1, 2, 3.0, 2, 3.0, 4, 1 + (2 + 1e-06) * 1.0j, 1 - (2 + 1e-06) * 1.0j])\n', (3607, 3679), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3727, 3739), 'numpy.array', 'array', (['[[1]]'], {}), '([[1]])\n', (3732, 3739), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3819, 3858), 'numpy.array', 'array', (['[[1.0, -7.0, 21.0, -37.0, 30.0]]'], {}), '([[1.0, -7.0, 21.0, -37.0, 30.0]])\n', (3824, 3858), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((4172, 4220), 'numpy.poly', 'poly', (['[1, 2, 3.0, 2, 3.0, 4, 1 + 1.0j, 1 - 1.0j]'], {}), '([1, 2, 3.0, 2, 3.0, 4, 1 + 1.0j, 1 - 1.0j])\n', (4176, 4220), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((2575, 2611), 'harold.matrix_slice', 'matrix_slice', (['M', '(1, 4)'], {'corner': '"""sw"""'}), "(M, (1, 4), corner='sw')\n", (2587, 2611), False, 'from harold import staircase, minimal_realization, hessenberg_realization, State, Transfer, matrix_slice, cancellation_distance\n'), ((3892, 3898), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (3895, 3898), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((3981, 3987), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (3984, 3987), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((4368, 4387), 'numpy.any', 'np.any', (['H.b[:-1, 0]'], {}), '(H.b[:-1, 0])\n', (4374, 4387), True, 'import numpy as np\n'), ((4459, 4478), 'numpy.any', 'np.any', (['H.c[0, :-1]'], {}), '(H.c[0, :-1])\n', (4465, 4478), True, 'import numpy as np\n'), ((4321, 4348), 'numpy.triu_indices_from', 'triu_indices_from', (['H.a'], {'k': '(2)'}), '(H.a, k=2)\n', (4338, 4348), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n'), ((4509, 4536), 'numpy.triu_indices_from', 'triu_indices_from', (['H.a'], {'k': '(2)'}), '(H.a, k=2)\n', (4526, 4536), False, 'from numpy import array, poly, zeros, eye, empty, triu_indices_from, zeros_like\n')] |
#!/usr/bin/python
import numpy as np
class BLC:
'Black Level Compensation'
def __init__(self, img, parameter, bayer_pattern, clip):
self.img = img
self.parameter = parameter
self.bayer_pattern = bayer_pattern
self.clip = clip
def clipping(self):
np.clip(self.img, 0, self.clip, out=self.img)
return self.img
def execute(self):
bl_r = self.parameter[0]
bl_gr = self.parameter[1]
bl_gb = self.parameter[2]
bl_b = self.parameter[3]
alpha = self.parameter[4]
beta = self.parameter[5]
raw_h = self.img.shape[0]
raw_w = self.img.shape[1]
blc_img = np.empty((raw_h,raw_w), np.int16)
if self.bayer_pattern == 'rggb':
r = self.img[::2, ::2] + bl_r
b = self.img[1::2, 1::2] + bl_b
gr = self.img[::2, 1::2] + bl_gr + alpha * r / 256
gb = self.img[1::2, ::2] + bl_gb + beta * b / 256
blc_img[::2, ::2] = r
blc_img[::2, 1::2] = gr
blc_img[1::2, ::2] = gb
blc_img[1::2, 1::2] = b
elif self.bayer_pattern == 'bggr':
b = self.img[::2, ::2] + bl_b
r = self.img[1::2, 1::2] + bl_r
gb = self.img[::2, 1::2] + bl_gb + beta * b / 256
gr = self.img[1::2, ::2] + bl_gr + alpha * r / 256
blc_img[::2, ::2] = b
blc_img[::2, 1::2] = gb
blc_img[1::2, ::2] = gr
blc_img[1::2, 1::2] = r
elif self.bayer_pattern == 'gbrg':
b = self.img[::2, 1::2] + bl_b
r = self.img[1::2, ::2] + bl_r
gb = self.img[::2, ::2] + bl_gb + beta * b / 256
gr = self.img[1::2, 1::2] + bl_gr + alpha * r / 256
blc_img[::2, ::2] = gb
blc_img[::2, 1::2] = b
blc_img[1::2, ::2] = r
blc_img[1::2, 1::2] = gr
elif self.bayer_pattern == 'grbg':
r = self.img[::2, 1::2] + bl_r
b = self.img[1::2, ::2] + bl_b
gr = self.img[::2, ::2] + bl_gr + alpha * r / 256
gb = self.img[1::2, 1::2] + bl_gb + beta * b / 256
blc_img[::2, ::2] = gr
blc_img[::2, 1::2] = r
blc_img[1::2, ::2] = b
blc_img[1::2, 1::2] = gb
self.img = blc_img
return self.clipping()
| [
"numpy.clip",
"numpy.empty"
] | [((301, 346), 'numpy.clip', 'np.clip', (['self.img', '(0)', 'self.clip'], {'out': 'self.img'}), '(self.img, 0, self.clip, out=self.img)\n', (308, 346), True, 'import numpy as np\n'), ((682, 716), 'numpy.empty', 'np.empty', (['(raw_h, raw_w)', 'np.int16'], {}), '((raw_h, raw_w), np.int16)\n', (690, 716), True, 'import numpy as np\n')] |
import sys
import os
import torch
import pandas as pd
import datetime
from argparse import ArgumentParser
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
from network.ecgresnet_auxout import ECGResNet_AuxOut
from utils.helpers import create_results_directory, create_weights_directory
from utils.focalloss_weights import FocalLoss
class ECGResNetSnapshotEnsemble_AuxOutSystem(pl.LightningModule):
"""
This class implements an snapshot ensemble of ECGResNets with auxiliary output in PyTorch Lightning.
It can estimate the epistemic and aleatoric uncertainty of its predictions.
"""
def __init__(self, in_channels, n_grps, N,
num_classes, dropout, first_width, stride,
dilation, learning_rate, ensemble_size, max_epochs, initial_lr, cyclical_learning_rate_type, n_logit_samples, loss_weights=None,
**kwargs):
"""
Initializes the ECGResNetSnapshotEnsemble_AuxOutSystem
Args:
in_channels: number of channels of input
n_grps: number of ResNet groups
N: number of blocks per groups
num_classes: number of classes of the classification problem
dropout: probability of an argument to get zeroed in the dropout layer
first_width: width of the first input
stride: tuple with stride value per block per group
dilation: spacing between the kernel points of the convolutional layers
learning_rate: the learning rate of the model
ensemble_size: the number of models that make up the ensemble
max_epochs: total number of epochs to train for
initial_lr: the initial learning rate at the start of a learning cycle
cyclical_learning_rate_type: the type of learning rate cycling to apply
n_logit_samples: number of logit samples of the auxiliary output
loss_weights: array of weights for the loss term
"""
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.num_classes = num_classes
self.ensemble_size = ensemble_size
self.max_epochs = max_epochs
self.initial_lr = initial_lr
self.cyclical_learning_rate_type = cyclical_learning_rate_type
self.n_logit_samples = n_logit_samples
self.IDs = torch.empty(0).type(torch.LongTensor)
self.predicted_labels = torch.empty(0).type(torch.LongTensor)
self.correct_predictions = torch.empty(0).type(torch.BoolTensor)
self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.total_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.models = []
self.optimizers = []
self.models.append(ECGResNet_AuxOut(in_channels,
n_grps, N, num_classes,
dropout, first_width,
stride, dilation)
)
if loss_weights is not None:
weights = torch.tensor(loss_weights, dtype = torch.float)
else:
weights = loss_weights
self.loss = FocalLoss(gamma=1, weights = weights)
create_weights_directory()
def forward(self, x, model_idx):
"""Performs a forward through a single ensemble member.
Args:
x (tensor): Input data.
model_idx (int): Index of the ensemble member.
Returns:
output1: Output at the auxiliary point of the ensemble member
output2: Output at the end of the ensemble member
output2_log_var: The log variance of the ensemble_member
"""
output1, output2_mean, output2_log_var = self.models[model_idx](x)
return output1, output2_mean, output2_log_var
def on_train_epoch_start(self):
"""
Set the cyclical learning rate for the current epoch
"""
learning_rate = self.get_learning_rate(self.current_epoch, self.ensemble_size, self.max_epochs, self.initial_lr, self.cyclical_learning_rate_type)
self.set_learning_rate(self.optimizers[0], learning_rate)
self.log('Learning rate', learning_rate)
print('Epoch: {} learning rate: {}'.format(self.current_epoch, learning_rate))
def training_step(self, batch, batch_idx):
"""Performs a training step for all ensemble members.
Args:
batch (dict): Output of the dataloader.
batch_idx (int): Index no. of this batch.
Returns:
tensor: Total loss for this step.
"""
data, target = batch['waveform'], batch['label']
model_idx = 0
# Make prediction
output1, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
train_loss1 = self.loss(output1, target)
train_loss2 = self.loss(x_i, target)
total_train_loss = (0.3 * train_loss1) + train_loss2
# Update weights for single model using individual optimizer
self.manual_backward(total_train_loss, self.optimizers[model_idx])
self.optimizers[model_idx].step()
self.optimizers[model_idx].zero_grad()
self.log('train_loss'.format(model_idx), total_train_loss)
return {'loss': total_train_loss}
def on_train_epoch_end(self, outputs):
"""
Save the model after each learning-rate cycle
"""
if self.cyclical_learning_rate_type == 'cosine-annealing':
epochs_per_cycle = self.max_epochs/self.ensemble_size
# Check if we are at the end of a learning-rate cycle
if (self.current_epoch +1) % epochs_per_cycle == 0:
model_idx = int((self.current_epoch+1 )/ epochs_per_cycle)
# Save current model
print('\nSaving model: {}/{}'.format(model_idx, self.ensemble_size))
torch.save({
'epoch': self.current_epoch,
'model_state_dict': self.models[0].state_dict(),
'optimizer_state_dict': self.optimizers[0].state_dict(),
}, "weights/ssensemble_auxout_model{}.pt".format(model_idx))
# self.trainer.save_checkpoint("weights/ssensemble_model{}.ckpt".format(model_idx))
def validation_step(self, batch, batch_idx):
data, target = batch['waveform'], batch['label']
model_idx = 0
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
# Apply softmax to obtain probability vector p_i
p_i = F.softmax(x_i, dim=1)
val_loss = self.loss(p_i, target)
acc = FM.accuracy(p_i, target)
# Log metrics
metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}
self.log('val_acc', acc.item())
self.log('val_loss', val_loss.item())
return metrics
def on_test_epoch_start(self):
"""
Initialize ensemble members from saved checkpoints
"""
print('\nInitializing ensemble members from checkpoints')
# Remove first model from self.models
self.models.clear()
for model_idx in range(self.ensemble_size):
# Initialize ensemble members from different epochs in the training stage of the original model
self.models.append(ECGResNet_AuxOut(self.hparams.in_channels,
self.hparams.n_grps, self.hparams.N, self.hparams.num_classes,
self.hparams.dropout, self.hparams.first_width,
self.hparams.stride, self.hparams.dilation, self.hparams.n_logit_samples)
)
model_path = 'weights/ssensemble_auxout_model{}.pt'.format(model_idx+1)
checkpoint = torch.load(model_path)
self.models[model_idx].load_state_dict(checkpoint['model_state_dict'])
self.models[model_idx].eval()
print('Model {}/{} initialized\n'.format(model_idx+1, self.ensemble_size))
def test_step(self, batch, batch_idx, save_to_csv=False):
prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
data, target = batch['waveform'], batch['label']
# Predict for each model
for model_idx, model in enumerate(self.models):
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
prediction_individual[:, model_idx] = x_i.data
# Take exponent to get the variance
output2_var = output2_log_var.exp()
aleatoric_var[:, model_idx] = output2_var.data
# Calculate mean and variance over predictions from individual ensemble members
prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)
prediction_ensemble_var = torch.var(prediction_individual, dim=1)
# Get the average aleatoric uncertainty for each prediction
prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1)
# Select the predicted labels
predicted_labels = prediction_ensemble_mean.argmax(dim=1)
test_loss = self.loss(prediction_ensemble_mean, target)
acc = FM.accuracy(prediction_ensemble_mean, target)
# Get the epistemic variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
# Get the aleatoric variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
total_var = predicted_labels_var + predicted_labels_aleatoric_var
# Log and save metrics
self.log('test_acc', acc.item())
self.log('test_loss', test_loss.item())
self.IDs = torch.cat((self.IDs, batch['id']), 0)
self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)
self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)
self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)
self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0)
self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)
return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}
# Initialize an optimizer for each model in the ensemble
def configure_optimizers(self):
"""
Initialize the optimizer, during training only a single model is used
"""
model_idx = 0
self.optimizers.append(optim.SGD(self.models[model_idx].parameters(), lr=self.initial_lr))
return self.optimizers
def get_learning_rate(self, epoch_idx, n_models, total_epochs, initial_lr, cyclical_learning_rate_type):
"""
Returns the learning rate for the current epoch.
Args:
epoch_idx: index of the current epoch
n_models: total number of ensemble members
total_epochs: total number of epochs to train for
initial_lr: the initial learning rate at the start of a learning cycle
cyclical_learning_rate_type: the type of learning rate cycling to apply
"""
if cyclical_learning_rate_type == 'cosine-annealing':
"""
Apply a cosine-annealing cyclical learning rate as proposed by
Loshchilov et al. in: "SGDR: Stochastic Gradient Descent with Warm Restarts"
"""
epochs_per_cycle = total_epochs/n_models
learning_rate = initial_lr * (np.cos(np.pi * (epoch_idx % epochs_per_cycle) / epochs_per_cycle) + 1) / 2
return learning_rate
else:
return learning_rate
def set_learning_rate(self, optimizer, learning_rate):
"""
Sets the learning rate for an optimizer
Args:
optimizer: optimizer to apply learning rate to
learning_rate: learning rate to set
"""
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='ssensemble_none')
parser.add_argument('--ensemble_size', type=int, default=2)
parser.add_argument('--ensembling_method', type=bool, default=True)
parser.add_argument('--initial_lr', type=float, default=0.1)
parser.add_argument('--cyclical_learning_rate_type', type=str, default='cosine-annealing', choices=['cosine-annealing', 'none'])
parser.add_argument('--n_logit_samples', type=int, default=100)
return parser
# Combine results into single dataframe and save to disk
def save_results(self):
"""
Combine results into single dataframe and save to disk as .csv file
"""
results = pd.concat([
pd.DataFrame(self.IDs.numpy(), columns= ['ID']),
pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),
pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),
pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']),
pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']),
pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']),
], axis=1)
create_results_directory()
results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
| [
"utils.focalloss_weights.FocalLoss",
"pytorch_lightning.metrics.functional.accuracy",
"utils.helpers.create_results_directory",
"argparse.ArgumentParser",
"torch.mean",
"torch.load",
"network.ecgresnet_auxout.ECGResNet_AuxOut",
"torch.tensor",
"utils.helpers.create_weights_directory",
"datetime.da... | [((3405, 3440), 'utils.focalloss_weights.FocalLoss', 'FocalLoss', ([], {'gamma': '(1)', 'weights': 'weights'}), '(gamma=1, weights=weights)\n', (3414, 3440), False, 'from utils.focalloss_weights import FocalLoss\n'), ((3451, 3477), 'utils.helpers.create_weights_directory', 'create_weights_directory', ([], {}), '()\n', (3475, 3477), False, 'from utils.helpers import create_results_directory, create_weights_directory\n'), ((7167, 7188), 'torch.nn.functional.softmax', 'F.softmax', (['x_i'], {'dim': '(1)'}), '(x_i, dim=1)\n', (7176, 7188), True, 'import torch.nn.functional as F\n'), ((7254, 7278), 'pytorch_lightning.metrics.functional.accuracy', 'FM.accuracy', (['p_i', 'target'], {}), '(p_i, target)\n', (7265, 7278), True, 'from pytorch_lightning.metrics import functional as FM\n'), ((8741, 8815), 'torch.empty', 'torch.empty', (["batch['label'].shape[0]", 'self.ensemble_size', 'self.num_classes'], {}), "(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n", (8752, 8815), False, 'import torch\n'), ((8840, 8914), 'torch.empty', 'torch.empty', (["batch['label'].shape[0]", 'self.ensemble_size', 'self.num_classes'], {}), "(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n", (8851, 8914), False, 'import torch\n'), ((9788, 9827), 'torch.var', 'torch.var', (['prediction_individual'], {'dim': '(1)'}), '(prediction_individual, dim=1)\n', (9797, 9827), False, 'import torch\n'), ((9932, 9964), 'torch.mean', 'torch.mean', (['aleatoric_var'], {'dim': '(1)'}), '(aleatoric_var, dim=1)\n', (9942, 9964), False, 'import torch\n'), ((10149, 10194), 'pytorch_lightning.metrics.functional.accuracy', 'FM.accuracy', (['prediction_ensemble_mean', 'target'], {}), '(prediction_ensemble_mean, target)\n', (10160, 10194), True, 'from pytorch_lightning.metrics import functional as FM\n'), ((11000, 11037), 'torch.cat', 'torch.cat', (["(self.IDs, batch['id'])", '(0)'], {}), "((self.IDs, batch['id']), 0)\n", (11009, 11037), False, 'import torch\n'), ((11070, 11125), 'torch.cat', 'torch.cat', (['(self.predicted_labels, predicted_labels)', '(0)'], {}), '((self.predicted_labels, predicted_labels), 0)\n', (11079, 11125), False, 'import torch\n'), ((11163, 11227), 'torch.cat', 'torch.cat', (['(self.epistemic_uncertainty, predicted_labels_var)', '(0)'], {}), '((self.epistemic_uncertainty, predicted_labels_var), 0)\n', (11172, 11227), False, 'import torch\n'), ((11265, 11339), 'torch.cat', 'torch.cat', (['(self.aleatoric_uncertainty, predicted_labels_aleatoric_var)', '(0)'], {}), '((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)\n', (11274, 11339), False, 'import torch\n'), ((11373, 11422), 'torch.cat', 'torch.cat', (['(self.total_uncertainty, total_var)', '(0)'], {}), '((self.total_uncertainty, total_var), 0)\n', (11382, 11422), False, 'import torch\n'), ((13480, 13535), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (13494, 13535), False, 'from argparse import ArgumentParser\n'), ((14841, 14867), 'utils.helpers.create_results_directory', 'create_results_directory', ([], {}), '()\n', (14865, 14867), False, 'from utils.helpers import create_results_directory, create_weights_directory\n'), ((3006, 3103), 'network.ecgresnet_auxout.ECGResNet_AuxOut', 'ECGResNet_AuxOut', (['in_channels', 'n_grps', 'N', 'num_classes', 'dropout', 'first_width', 'stride', 'dilation'], {}), '(in_channels, n_grps, N, num_classes, dropout, first_width,\n stride, dilation)\n', (3022, 3103), False, 'from network.ecgresnet_auxout import ECGResNet_AuxOut\n'), ((3287, 3332), 'torch.tensor', 'torch.tensor', (['loss_weights'], {'dtype': 'torch.float'}), '(loss_weights, dtype=torch.float)\n', (3299, 3332), False, 'import torch\n'), ((8410, 8432), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (8420, 8432), False, 'import torch\n'), ((9705, 9745), 'torch.mean', 'torch.mean', (['prediction_individual'], {'dim': '(1)'}), '(prediction_individual, dim=1)\n', (9715, 9745), False, 'import torch\n'), ((2519, 2533), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (2530, 2533), False, 'import torch\n'), ((2589, 2603), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (2600, 2603), False, 'import torch\n'), ((2662, 2676), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (2673, 2676), False, 'import torch\n'), ((2737, 2751), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (2748, 2751), False, 'import torch\n'), ((2813, 2827), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (2824, 2827), False, 'import torch\n'), ((2885, 2899), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (2896, 2899), False, 'import torch\n'), ((7943, 8185), 'network.ecgresnet_auxout.ECGResNet_AuxOut', 'ECGResNet_AuxOut', (['self.hparams.in_channels', 'self.hparams.n_grps', 'self.hparams.N', 'self.hparams.num_classes', 'self.hparams.dropout', 'self.hparams.first_width', 'self.hparams.stride', 'self.hparams.dilation', 'self.hparams.n_logit_samples'], {}), '(self.hparams.in_channels, self.hparams.n_grps, self.\n hparams.N, self.hparams.num_classes, self.hparams.dropout, self.hparams\n .first_width, self.hparams.stride, self.hparams.dilation, self.hparams.\n n_logit_samples)\n', (7959, 8185), False, 'from network.ecgresnet_auxout import ECGResNet_AuxOut\n'), ((12904, 12969), 'numpy.cos', 'np.cos', (['(np.pi * (epoch_idx % epochs_per_cycle) / epochs_per_cycle)'], {}), '(np.pi * (epoch_idx % epochs_per_cycle) / epochs_per_cycle)\n', (12910, 12969), True, 'import numpy as np\n'), ((14951, 14974), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14972, 14974), False, 'import datetime\n')] |
import logging
import numpy as np
import kubric as kb
from kubric.renderer.blender import Blender as KubricBlender
from kubric.simulator.pybullet import PyBullet as KubricSimulator
logging.basicConfig(level="DEBUG") # < CRITICAL, ERROR, WARNING, INFO, DEBUG
# --- create scene and attach a renderer and simulator
scene = kb.Scene(resolution=(256, 256))
scene.frame_end = 48 # < numbers of frames to render
scene.frame_rate = 24 # < rendering framerate
scene.step_rate = 240 # < total simulation steps
renderer = KubricBlender(scene)
simulator = KubricSimulator(scene)
# --- populate the scene with objects, lights, cameras
scene += kb.Cube(name="floor", scale=(3, 3, 0.1), position=(0, 0, -0.1),
static=True)
scene += kb.DirectionalLight(name="sun", position=(-1, -0.5, 3),
look_at=(0, 0, 0), intensity=1.5)
scene.camera = kb.PerspectiveCamera(name="camera", position=(2, -0.5, 4),
look_at=(0, 0, 0))
# --- generates spheres randomly within a spawn region
spawn_region = [[-1, -1, 0], [1, 1, 1]]
rng = np.random.default_rng()
for i in range(8):
velocity = rng.uniform([-1, -1, 0], [1, 1, 0])
material = kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng))
sphere = kb.Sphere(scale=0.1, velocity=velocity, material=material)
scene += sphere
kb.move_until_no_overlap(sphere, simulator, spawn_region=spawn_region)
# --- executes the simulation (and store keyframes)
simulator.run()
# --- renders the output
renderer.save_state("output/simulator.blend")
frames_dict = renderer.render()
kb.write_image_dict(frames_dict, "output")
| [
"logging.basicConfig",
"kubric.DirectionalLight",
"kubric.write_image_dict",
"kubric.Cube",
"numpy.random.default_rng",
"kubric.random_hue_color",
"kubric.Scene",
"kubric.simulator.pybullet.PyBullet",
"kubric.move_until_no_overlap",
"kubric.Sphere",
"kubric.renderer.blender.Blender",
"kubric.P... | [((182, 216), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""DEBUG"""'}), "(level='DEBUG')\n", (201, 216), False, 'import logging\n'), ((324, 355), 'kubric.Scene', 'kb.Scene', ([], {'resolution': '(256, 256)'}), '(resolution=(256, 256))\n', (332, 355), True, 'import kubric as kb\n'), ((519, 539), 'kubric.renderer.blender.Blender', 'KubricBlender', (['scene'], {}), '(scene)\n', (532, 539), True, 'from kubric.renderer.blender import Blender as KubricBlender\n'), ((552, 574), 'kubric.simulator.pybullet.PyBullet', 'KubricSimulator', (['scene'], {}), '(scene)\n', (567, 574), True, 'from kubric.simulator.pybullet import PyBullet as KubricSimulator\n'), ((640, 716), 'kubric.Cube', 'kb.Cube', ([], {'name': '"""floor"""', 'scale': '(3, 3, 0.1)', 'position': '(0, 0, -0.1)', 'static': '(True)'}), "(name='floor', scale=(3, 3, 0.1), position=(0, 0, -0.1), static=True)\n", (647, 716), True, 'import kubric as kb\n'), ((743, 836), 'kubric.DirectionalLight', 'kb.DirectionalLight', ([], {'name': '"""sun"""', 'position': '(-1, -0.5, 3)', 'look_at': '(0, 0, 0)', 'intensity': '(1.5)'}), "(name='sun', position=(-1, -0.5, 3), look_at=(0, 0, 0),\n intensity=1.5)\n", (762, 836), True, 'import kubric as kb\n'), ((877, 954), 'kubric.PerspectiveCamera', 'kb.PerspectiveCamera', ([], {'name': '"""camera"""', 'position': '(2, -0.5, 4)', 'look_at': '(0, 0, 0)'}), "(name='camera', position=(2, -0.5, 4), look_at=(0, 0, 0))\n", (897, 954), True, 'import kubric as kb\n'), ((1093, 1116), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (1114, 1116), True, 'import numpy as np\n'), ((1594, 1636), 'kubric.write_image_dict', 'kb.write_image_dict', (['frames_dict', '"""output"""'], {}), "(frames_dict, 'output')\n", (1613, 1636), True, 'import kubric as kb\n'), ((1271, 1329), 'kubric.Sphere', 'kb.Sphere', ([], {'scale': '(0.1)', 'velocity': 'velocity', 'material': 'material'}), '(scale=0.1, velocity=velocity, material=material)\n', (1280, 1329), True, 'import kubric as kb\n'), ((1350, 1420), 'kubric.move_until_no_overlap', 'kb.move_until_no_overlap', (['sphere', 'simulator'], {'spawn_region': 'spawn_region'}), '(sphere, simulator, spawn_region=spawn_region)\n', (1374, 1420), True, 'import kubric as kb\n'), ((1230, 1258), 'kubric.random_hue_color', 'kb.random_hue_color', ([], {'rng': 'rng'}), '(rng=rng)\n', (1249, 1258), True, 'import kubric as kb\n')] |
from textwrap import dedent
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
option_context,
)
pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import (
_parse_latex_cell_styles,
_parse_latex_css_conversion,
_parse_latex_header_span,
_parse_latex_table_styles,
_parse_latex_table_wrapping,
)
@pytest.fixture
def df():
return DataFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]})
@pytest.fixture
def df_ext():
return DataFrame(
{"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0, precision=2)
def test_minimal_latex_tabular(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_tabular_hrules(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\toprule
& A & B & C \\\\
\\midrule
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\bottomrule
\\end{tabular}
"""
)
assert styler.to_latex(hrules=True) == expected
def test_tabular_custom_hrules(styler):
styler.set_table_styles(
[
{"selector": "toprule", "props": ":hline"},
{"selector": "bottomrule", "props": ":otherline"},
]
) # no midrule
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\hline
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\otherline
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_column_format(styler):
# default setting is already tested in `test_latex_minimal_tabular`
styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}])
assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr")
styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}])
assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
def test_siunitx_cols(styler):
expected = dedent(
"""\
\\begin{tabular}{lSSl}
{} & {A} & {B} & {C} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex(siunitx=True) == expected
def test_position(styler):
assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
assert "\\end{table}" in styler.to_latex(position="h!")
styler.set_table_styles([{"selector": "position", "props": ":b!"}])
assert "\\begin{table}[b!]" in styler.to_latex()
assert "\\end{table}" in styler.to_latex()
@pytest.mark.parametrize("env", [None, "longtable"])
def test_label(styler, env):
assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
assert "\n\\label{more :text}" in styler.to_latex(environment=env)
def test_position_float_raises(styler):
msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="bad_string")
msg = "`position_float` cannot be used in 'longtable' `environment`"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="centering", environment="longtable")
@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
def test_kwargs_combinations(
styler, label, position, caption, column_format, position_float
):
result = styler.to_latex(
label=label[0],
position=position[0],
caption=caption[0],
column_format=column_format[0],
position_float=position_float[0],
)
assert label[1] in result
assert position[1] in result
assert caption[1] in result
assert column_format[1] in result
assert position_float[1] in result
def test_custom_table_styles(styler):
styler.set_table_styles(
[
{"selector": "mycommand", "props": ":{myoptions}"},
{"selector": "mycommand2", "props": ":{myoptions2}"},
]
)
expected = dedent(
"""\
\\begin{table}
\\mycommand{myoptions}
\\mycommand2{myoptions2}
"""
)
assert expected in styler.to_latex()
def test_cell_styling(styler):
styler.highlight_max(props="itshape:;Huge:--wrap;")
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
\\end{tabular}
"""
)
assert expected == styler.to_latex()
def test_multiindex_columns(df):
cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df.columns = cidx
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& \\multicolumn{2}{r}{A} & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = df.style.format(precision=2)
assert expected == s.to_latex()
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & A & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = df.style.format(precision=2)
assert expected == s.to_latex(sparse_columns=False)
def test_multiindex_row(df_ext):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex()
assert expected == result
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False)
assert expected == result
def test_multirow_naive(df_ext):
ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
df_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
X & x & 0 & -0.61 & ab \\\\
& y & 1 & -1.22 & cd \\\\
Y & z & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex(multirow_align="naive")
assert expected == result
def test_multiindex_row_and_col(df_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & \\multicolumn{2}{l}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex(multirow_align="b", multicol_align="l")
assert result == expected
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & Z & Z & Y \\\\
& & a & b & c \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False, sparse_columns=False)
assert result == expected
@pytest.mark.parametrize(
"multicol_align, siunitx, header",
[
("naive-l", False, " & A & &"),
("naive-r", False, " & & & A"),
("naive-l", True, "{} & {A} & {} & {}"),
("naive-r", True, "{} & {} & {} & {A}"),
],
)
def test_multicol_naive(df, multicol_align, siunitx, header):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
df.columns = ridx
level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
col_format = "lrrl" if not siunitx else "lSSl"
expected = dedent(
f"""\
\\begin{{tabular}}{{{col_format}}}
{header} \\\\
{level1} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{{tabular}}
"""
)
styler = df.style.format(precision=2)
result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
assert expected == result
def test_multi_options(df_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style.format(precision=2)
expected = dedent(
"""\
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
"""
)
result = styler.to_latex()
assert expected in result
with option_context("styler.latex.multicol_align", "l"):
assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
with option_context("styler.latex.multirow_align", "b"):
assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
def test_multiindex_columns_hidden():
df = DataFrame([[1, 2, 3, 4]])
df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
s = df.style
assert "{tabular}{lrrrr}" in s.to_latex()
s.set_table_styles([]) # reset the position command
s.hide([("A", 2)], axis="columns")
assert "{tabular}{lrrr}" in s.to_latex()
@pytest.mark.parametrize(
"option, value",
[
("styler.sparse.index", True),
("styler.sparse.index", False),
("styler.sparse.columns", True),
("styler.sparse.columns", False),
],
)
def test_sparse_options(df_ext, option, value):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style
latex1 = styler.to_latex()
with option_context(option, value):
latex2 = styler.to_latex()
assert (latex1 == latex2) is value
def test_hidden_index(styler):
styler.hide(axis="index")
expected = dedent(
"""\
\\begin{tabular}{rrl}
A & B & C \\\\
0 & -0.61 & ab \\\\
1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
@pytest.mark.parametrize("environment", ["table", "figure*", None])
def test_comprehensive(df_ext, environment):
# test as many low level features simultaneously as possible
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
stlr = df_ext.style
stlr.set_caption("mycap")
stlr.set_table_styles(
[
{"selector": "label", "props": ":{fig§item}"},
{"selector": "position", "props": ":h!"},
{"selector": "position_float", "props": ":centering"},
{"selector": "column_format", "props": ":rlrlr"},
{"selector": "toprule", "props": ":toprule"},
{"selector": "midrule", "props": ":midrule"},
{"selector": "bottomrule", "props": ":bottomrule"},
{"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command
]
)
stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
expected = (
"""\
\\begin{table}[h!]
\\centering
\\caption{mycap}
\\label{fig:item}
\\rowcolors{3}{pink}{}
\\begin{tabular}{rlrlr}
\\toprule
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\midrule
\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
"""\
\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
).replace("table", environment if environment else "table")
result = stlr.format(precision=2).to_latex(environment=environment)
assert result == expected
def test_environment_option(styler):
with option_context("styler.latex.environment", "bar-env"):
assert "\\begin{bar-env}" in styler.to_latex()
assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
def test_parse_latex_table_styles(styler):
styler.set_table_styles(
[
{"selector": "foo", "props": [("attr", "value")]},
{"selector": "bar", "props": [("attr", "overwritten")]},
{"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
{"selector": "label", "props": [("", "{fig§item}")]},
]
)
assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
# test '§' replaced by ':' [for CSS compatibility]
assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
def test_parse_latex_cell_styles_basic(): # test nesting
cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
assert _parse_latex_cell_styles(cell_style, "text") == expected
@pytest.mark.parametrize(
"wrap_arg, expected",
[ # test wrapping
("", "\\<command><options> <display_value>"),
("--wrap", "{\\<command><options> <display_value>}"),
("--nowrap", "\\<command><options> <display_value>"),
("--lwrap", "{\\<command><options>} <display_value>"),
("--dwrap", "{\\<command><options>}{<display_value>}"),
("--rwrap", "\\<command><options>{<display_value>}"),
],
)
def test_parse_latex_cell_styles_braces(wrap_arg, expected):
cell_style = [("<command>", f"<options>{wrap_arg}")]
assert _parse_latex_cell_styles(cell_style, "<display_value>") == expected
def test_parse_latex_header_span():
cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
expected = "\\multicolumn{3}{Y}{text}"
assert _parse_latex_header_span(cell, "X", "Y") == expected
cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
expected = "\\multirow[X]{5}{*}{text}"
assert _parse_latex_header_span(cell, "X", "Y") == expected
cell = {"display_value": "text", "cellstyle": []}
assert _parse_latex_header_span(cell, "X", "Y") == "text"
cell = {"display_value": "text", "cellstyle": [("bfseries", "--rwrap")]}
assert _parse_latex_header_span(cell, "X", "Y") == "\\bfseries{text}"
def test_parse_latex_table_wrapping(styler):
styler.set_table_styles(
[
{"selector": "toprule", "props": ":value"},
{"selector": "bottomrule", "props": ":value"},
{"selector": "midrule", "props": ":value"},
{"selector": "column_format", "props": ":value"},
]
)
assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False
assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True
styler.set_table_styles(
[
{"selector": "not-ignored", "props": ":value"},
],
overwrite=False,
)
assert _parse_latex_table_wrapping(styler.table_styles, None) is True
def test_short_caption(styler):
result = styler.to_latex(caption=("full cap", "short cap"))
assert "\\caption[short cap]{full cap}" in result
@pytest.mark.parametrize(
"css, expected",
[
([("color", "red")], [("color", "{red}")]), # test color and input format types
(
[("color", "rgb(128, 128, 128 )")],
[("color", "[rgb]{0.502, 0.502, 0.502}")],
),
(
[("color", "rgb(128, 50%, 25% )")],
[("color", "[rgb]{0.502, 0.500, 0.250}")],
),
(
[("color", "rgba(128,128,128,1)")],
[("color", "[rgb]{0.502, 0.502, 0.502}")],
),
([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]),
([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]),
([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types
([("font-weight", "bolder")], [("bfseries", "")]),
([("font-weight", "normal")], []),
([("background-color", "red")], [("cellcolor", "{red}--lwrap")]),
(
[("background-color", "#FF00FF")], # test background-color command and wrap
[("cellcolor", "[HTML]{FF00FF}--lwrap")],
),
([("font-style", "italic")], [("itshape", "")]), # test font-style and types
([("font-style", "oblique")], [("slshape", "")]),
([("font-style", "normal")], []),
([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments
([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]),
],
)
def test_parse_latex_css_conversion(css, expected):
result = _parse_latex_css_conversion(css)
assert result == expected
@pytest.mark.parametrize(
"env, inner_env",
[
(None, "tabular"),
("table", "tabular"),
("longtable", "longtable"),
],
)
@pytest.mark.parametrize(
"convert, exp", [(True, "bfseries"), (False, "font-weightbold")]
)
def test_parse_latex_css_convert_minimal(styler, env, inner_env, convert, exp):
# parameters ensure longtable template is also tested
styler.highlight_max(props="font-weight:bold;")
result = styler.to_latex(convert_css=convert, environment=env)
expected = dedent(
f"""\
0 & 0 & \\{exp} -0.61 & ab \\\\
1 & \\{exp} 1 & -1.22 & \\{exp} cd \\\\
\\end{{{inner_env}}}
"""
)
assert expected in result
def test_parse_latex_css_conversion_option():
css = [("command", "option--latex--wrap")]
expected = [("command", "option--wrap")]
result = _parse_latex_css_conversion(css)
assert result == expected
def test_styler_object_after_render(styler):
# GH 42320
pre_render = styler._copy(deepcopy=True)
styler.to_latex(
column_format="rllr",
position="h",
position_float="centering",
hrules=True,
label="my lab",
caption="my cap",
)
assert pre_render.table_styles == styler.table_styles
assert pre_render.caption == styler.caption
def test_longtable_comprehensive(styler):
result = styler.to_latex(
environment="longtable", hrules=True, label="fig:A", caption=("full", "short")
)
expected = dedent(
"""\
\\begin{longtable}{lrrl}
\\caption[short]{full} \\label{fig:A} \\\\
\\toprule
& A & B & C \\\\
\\midrule
\\endfirsthead
\\caption[]{full} \\\\
\\toprule
& A & B & C \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{4}{r}{Continued on next page} \\\\
\\midrule
\\endfoot
\\bottomrule
\\endlastfoot
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{longtable}
"""
)
assert result == expected
def test_longtable_minimal(styler):
result = styler.to_latex(environment="longtable")
expected = dedent(
"""\
\\begin{longtable}{lrrl}
& A & B & C \\\\
\\endfirsthead
& A & B & C \\\\
\\endhead
\\multicolumn{4}{r}{Continued on next page} \\\\
\\endfoot
\\endlastfoot
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{longtable}
"""
)
assert result == expected
@pytest.mark.parametrize(
"sparse, exp, siunitx",
[
(True, "{} & \\multicolumn{2}{r}{A} & {B}", True),
(False, "{} & {A} & {A} & {B}", True),
(True, " & \\multicolumn{2}{r}{A} & B", False),
(False, " & A & A & B", False),
],
)
def test_longtable_multiindex_columns(df, sparse, exp, siunitx):
cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df.columns = cidx
with_si = "{} & {a} & {b} & {c} \\\\"
without_si = " & a & b & c \\\\"
expected = dedent(
f"""\
\\begin{{longtable}}{{l{"SS" if siunitx else "rr"}l}}
{exp} \\\\
{with_si if siunitx else without_si}
\\endfirsthead
{exp} \\\\
{with_si if siunitx else without_si}
\\endhead
"""
)
result = df.style.to_latex(
environment="longtable", sparse_columns=sparse, siunitx=siunitx
)
assert expected in result
@pytest.mark.parametrize(
"caption, cap_exp",
[
("full", ("{full}", "")),
(("full", "short"), ("{full}", "[short]")),
],
)
@pytest.mark.parametrize("label, lab_exp", [(None, ""), ("tab:A", " \\label{tab:A}")])
def test_longtable_caption_label(styler, caption, cap_exp, label, lab_exp):
cap_exp1 = f"\\caption{cap_exp[1]}{cap_exp[0]}"
cap_exp2 = f"\\caption[]{cap_exp[0]}"
expected = dedent(
f"""\
{cap_exp1}{lab_exp} \\\\
& A & B & C \\\\
\\endfirsthead
{cap_exp2} \\\\
"""
)
assert expected in styler.to_latex(
environment="longtable", caption=caption, label=label
)
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize(
"columns, siunitx",
[
(True, True),
(True, False),
(False, False),
],
)
def test_apply_map_header_render_mi(df_ext, index, columns, siunitx):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style
func = lambda v: "bfseries: --rwrap" if "A" in v or "Z" in v or "c" in v else None
if index:
styler.applymap_index(func, axis="index")
if columns:
styler.applymap_index(func, axis="columns")
result = styler.to_latex(siunitx=siunitx)
expected_index = dedent(
"""\
\\multirow[c]{2}{*}{\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\
\\bfseries{} & b & 1 & -1.220000 & cd \\\\
B & \\bfseries{c} & 2 & -2.220000 & de \\\\
"""
)
assert (expected_index in result) is index
exp_cols_si = dedent(
"""\
{} & {} & \\multicolumn{2}{r}{\\bfseries{Z}} & {Y} \\\\
{} & {} & {a} & {b} & {\\bfseries{c}} \\\\
"""
)
exp_cols_no_si = """\
& & \\multicolumn{2}{r}{\\bfseries{Z}} & Y \\\\
& & a & b & \\bfseries{c} \\\\
"""
assert ((exp_cols_si if siunitx else exp_cols_no_si) in result) is columns
def test_repr_option(styler):
assert "<style" in styler._repr_html_()[:6]
assert styler._repr_latex_() is None
with option_context("styler.render.repr", "latex"):
assert "\\begin{tabular}" in styler._repr_latex_()[:15]
assert styler._repr_html_() is None
@pytest.mark.parametrize("option", ["hrules"])
def test_bool_options(styler, option):
with option_context(f"styler.latex.{option}", False):
latex_false = styler.to_latex()
with option_context(f"styler.latex.{option}", True):
latex_true = styler.to_latex()
assert latex_false != latex_true # options are reactive under to_latex(*no_args)
def test_siunitx_basic_headers(styler):
assert "{} & {A} & {B} & {C} \\\\" in styler.to_latex(siunitx=True)
assert " & A & B & C \\\\" in styler.to_latex() # default siunitx=False
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_css_convert_apply_index(styler, axis):
styler.applymap_index(lambda x: "font-weight: bold;", axis=axis)
for label in getattr(styler, axis):
assert f"\\bfseries {label}" in styler.to_latex(convert_css=True)
def test_hide_index_latex(styler):
# GH 43637
styler.hide([0], axis=0)
result = styler.to_latex()
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert expected == result
def test_latex_hiding_index_columns_multiindex_alignment():
# gh 43644
midx = MultiIndex.from_product(
[["i0", "j0"], ["i1"], ["i2", "j2"]], names=["i-0", "i-1", "i-2"]
)
cidx = MultiIndex.from_product(
[["c0"], ["c1", "d1"], ["c2", "d2"]], names=["c-0", "c-1", "c-2"]
)
df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx)
styler = Styler(df, uuid_len=0)
styler.hide(level=1, axis=0).hide(level=0, axis=1)
styler.hide([("i0", "i1", "i2")], axis=0)
styler.hide([("c0", "c1", "c2")], axis=1)
styler.applymap(lambda x: "color:{red};" if x == 5 else "")
styler.applymap_index(lambda x: "color:{blue};" if "j" in x else "")
result = styler.to_latex()
expected = dedent(
"""\
\\begin{tabular}{llrrr}
& c-1 & c1 & \\multicolumn{2}{r}{d1} \\\\
& c-2 & d2 & c2 & d2 \\\\
i-0 & i-2 & & & \\\\
i0 & \\color{blue} j2 & \\color{red} 5 & 6 & 7 \\\\
\\multirow[c]{2}{*}{\\color{blue} j0} & i2 & 9 & 10 & 11 \\\\
\\color{blue} & \\color{blue} j2 & 13 & 14 & 15 \\\\
\\end{tabular}
"""
)
assert result == expected
| [
"textwrap.dedent",
"pandas.io.formats.style_render._parse_latex_css_conversion",
"pandas.io.formats.style.Styler",
"pandas.MultiIndex.from_product",
"pandas.io.formats.style_render._parse_latex_header_span",
"pandas.option_context",
"pytest.mark.parametrize",
"pytest.importorskip",
"pytest.raises",
... | [((138, 167), 'pytest.importorskip', 'pytest.importorskip', (['"""jinja2"""'], {}), "('jinja2')\n", (157, 167), False, 'import pytest\n'), ((2891, 2942), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env"""', "[None, 'longtable']"], {}), "('env', [None, 'longtable'])\n", (2914, 2942), False, 'import pytest\n'), ((3632, 3705), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""label"""', "[(None, ''), ('text', '\\\\label{text}')]"], {}), "('label', [(None, ''), ('text', '\\\\label{text}')])\n", (3655, 3705), False, 'import pytest\n'), ((3707, 3779), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""position"""', "[(None, ''), ('h!', '{table}[h!]')]"], {}), "('position', [(None, ''), ('h!', '{table}[h!]')])\n", (3730, 3779), False, 'import pytest\n'), ((3781, 3858), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caption"""', "[(None, ''), ('text', '\\\\caption{text}')]"], {}), "('caption', [(None, ''), ('text', '\\\\caption{text}')])\n", (3804, 3858), False, 'import pytest\n'), ((3860, 3947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""column_format"""', "[(None, ''), ('rcrl', '{tabular}{rcrl}')]"], {}), "('column_format', [(None, ''), ('rcrl',\n '{tabular}{rcrl}')])\n", (3883, 3947), False, 'import pytest\n'), ((3945, 4034), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""position_float"""', "[(None, ''), ('centering', '\\\\centering')]"], {}), "('position_float', [(None, ''), ('centering',\n '\\\\centering')])\n", (3968, 4034), False, 'import pytest\n'), ((8513, 8728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multicol_align, siunitx, header"""', "[('naive-l', False, ' & A & &'), ('naive-r', False, ' & & & A'), ('naive-l',\n True, '{} & {A} & {} & {}'), ('naive-r', True, '{} & {} & {} & {A}')]"], {}), "('multicol_align, siunitx, header', [('naive-l', \n False, ' & A & &'), ('naive-r', False, ' & & & A'), ('naive-l', True,\n '{} & {A} & {} & {}'), ('naive-r', True, '{} & {} & {} & {A}')])\n", (8536, 8728), False, 'import pytest\n'), ((10581, 10763), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option, value"""', "[('styler.sparse.index', True), ('styler.sparse.index', False), (\n 'styler.sparse.columns', True), ('styler.sparse.columns', False)]"], {}), "('option, value', [('styler.sparse.index', True), (\n 'styler.sparse.index', False), ('styler.sparse.columns', True), (\n 'styler.sparse.columns', False)])\n", (10604, 10763), False, 'import pytest\n'), ((11507, 11573), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""environment"""', "['table', 'figure*', None]"], {}), "('environment', ['table', 'figure*', None])\n", (11530, 11573), False, 'import pytest\n'), ((14433, 14823), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wrap_arg, expected"""', "[('', '\\\\<command><options> <display_value>'), ('--wrap',\n '{\\\\<command><options> <display_value>}'), ('--nowrap',\n '\\\\<command><options> <display_value>'), ('--lwrap',\n '{\\\\<command><options>} <display_value>'), ('--dwrap',\n '{\\\\<command><options>}{<display_value>}'), ('--rwrap',\n '\\\\<command><options>{<display_value>}')]"], {}), "('wrap_arg, expected', [('',\n '\\\\<command><options> <display_value>'), ('--wrap',\n '{\\\\<command><options> <display_value>}'), ('--nowrap',\n '\\\\<command><options> <display_value>'), ('--lwrap',\n '{\\\\<command><options>} <display_value>'), ('--dwrap',\n '{\\\\<command><options>}{<display_value>}'), ('--rwrap',\n '\\\\<command><options>{<display_value>}')])\n", (14456, 14823), False, 'import pytest\n'), ((16642, 17703), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""css, expected"""', "[([('color', 'red')], [('color', '{red}')]), ([('color',\n 'rgb(128, 128, 128 )')], [('color', '[rgb]{0.502, 0.502, 0.502}')]), ([\n ('color', 'rgb(128, 50%, 25% )')], [('color',\n '[rgb]{0.502, 0.500, 0.250}')]), ([('color', 'rgba(128,128,128,1)')], [\n ('color', '[rgb]{0.502, 0.502, 0.502}')]), ([('color', '#FF00FF')], [(\n 'color', '[HTML]{FF00FF}')]), ([('color', '#F0F')], [('color',\n '[HTML]{FF00FF}')]), ([('font-weight', 'bold')], [('bfseries', '')]), (\n [('font-weight', 'bolder')], [('bfseries', '')]), ([('font-weight',\n 'normal')], []), ([('background-color', 'red')], [('cellcolor',\n '{red}--lwrap')]), ([('background-color', '#FF00FF')], [('cellcolor',\n '[HTML]{FF00FF}--lwrap')]), ([('font-style', 'italic')], [('itshape',\n '')]), ([('font-style', 'oblique')], [('slshape', '')]), ([(\n 'font-style', 'normal')], []), ([('color', 'red /*--dwrap*/')], [(\n 'color', '{red}--dwrap')]), ([('background-color', 'red /* --dwrap */')\n ], [('cellcolor', '{red}--dwrap')])]"], {}), "('css, expected', [([('color', 'red')], [('color',\n '{red}')]), ([('color', 'rgb(128, 128, 128 )')], [('color',\n '[rgb]{0.502, 0.502, 0.502}')]), ([('color', 'rgb(128, 50%, 25% )')], [\n ('color', '[rgb]{0.502, 0.500, 0.250}')]), ([('color',\n 'rgba(128,128,128,1)')], [('color', '[rgb]{0.502, 0.502, 0.502}')]), ([\n ('color', '#FF00FF')], [('color', '[HTML]{FF00FF}')]), ([('color',\n '#F0F')], [('color', '[HTML]{FF00FF}')]), ([('font-weight', 'bold')], [\n ('bfseries', '')]), ([('font-weight', 'bolder')], [('bfseries', '')]),\n ([('font-weight', 'normal')], []), ([('background-color', 'red')], [(\n 'cellcolor', '{red}--lwrap')]), ([('background-color', '#FF00FF')], [(\n 'cellcolor', '[HTML]{FF00FF}--lwrap')]), ([('font-style', 'italic')], [\n ('itshape', '')]), ([('font-style', 'oblique')], [('slshape', '')]), ([\n ('font-style', 'normal')], []), ([('color', 'red /*--dwrap*/')], [(\n 'color', '{red}--dwrap')]), ([('background-color', 'red /* --dwrap */')\n ], [('cellcolor', '{red}--dwrap')])])\n", (16665, 17703), False, 'import pytest\n'), ((18210, 18326), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env, inner_env"""', "[(None, 'tabular'), ('table', 'tabular'), ('longtable', 'longtable')]"], {}), "('env, inner_env', [(None, 'tabular'), ('table',\n 'tabular'), ('longtable', 'longtable')])\n", (18233, 18326), False, 'import pytest\n'), ((18366, 18459), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""convert, exp"""', "[(True, 'bfseries'), (False, 'font-weightbold')]"], {}), "('convert, exp', [(True, 'bfseries'), (False,\n 'font-weightbold')])\n", (18389, 18459), False, 'import pytest\n'), ((20790, 21022), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sparse, exp, siunitx"""', "[(True, '{} & \\\\multicolumn{2}{r}{A} & {B}', True), (False,\n '{} & {A} & {A} & {B}', True), (True, ' & \\\\multicolumn{2}{r}{A} & B', \n False), (False, ' & A & A & B', False)]"], {}), "('sparse, exp, siunitx', [(True,\n '{} & \\\\multicolumn{2}{r}{A} & {B}', True), (False,\n '{} & {A} & {A} & {B}', True), (True, ' & \\\\multicolumn{2}{r}{A} & B', \n False), (False, ' & A & A & B', False)])\n", (20813, 21022), False, 'import pytest\n'), ((21727, 21847), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""caption, cap_exp"""', "[('full', ('{full}', '')), (('full', 'short'), ('{full}', '[short]'))]"], {}), "('caption, cap_exp', [('full', ('{full}', '')), ((\n 'full', 'short'), ('{full}', '[short]'))])\n", (21750, 21847), False, 'import pytest\n'), ((21878, 21967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""label, lab_exp"""', "[(None, ''), ('tab:A', ' \\\\label{tab:A}')]"], {}), "('label, lab_exp', [(None, ''), ('tab:A',\n ' \\\\label{tab:A}')])\n", (21901, 21967), False, 'import pytest\n'), ((22407, 22454), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""index"""', '[True, False]'], {}), "('index', [True, False])\n", (22430, 22454), False, 'import pytest\n'), ((22456, 22551), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""columns, siunitx"""', '[(True, True), (True, False), (False, False)]'], {}), "('columns, siunitx', [(True, True), (True, False), (\n False, False)])\n", (22479, 22551), False, 'import pytest\n'), ((24052, 24097), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option"""', "['hrules']"], {}), "('option', ['hrules'])\n", (24075, 24097), False, 'import pytest\n'), ((24611, 24664), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', "['index', 'columns']"], {}), "('axis', ['index', 'columns'])\n", (24634, 24664), False, 'import pytest\n'), ((454, 518), 'pandas.DataFrame', 'DataFrame', (["{'A': [0, 1], 'B': [-0.61, -1.22], 'C': ['ab', 'cd']}"], {}), "({'A': [0, 1], 'B': [-0.61, -1.22], 'C': ['ab', 'cd']})\n", (463, 518), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((562, 647), 'pandas.DataFrame', 'DataFrame', (["{'A': [0, 1, 2], 'B': [-0.61, -1.22, -2.22], 'C': ['ab', 'cd', 'de']}"], {}), "({'A': [0, 1, 2], 'B': [-0.61, -1.22, -2.22], 'C': ['ab', 'cd', 'de']}\n )\n", (571, 647), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((702, 737), 'pandas.io.formats.style.Styler', 'Styler', (['df'], {'uuid_len': '(0)', 'precision': '(2)'}), '(df, uuid_len=0, precision=2)\n', (708, 737), False, 'from pandas.io.formats.style import Styler\n'), ((795, 971), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n & A & B & C \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n & A & B & C \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (801, 971), False, 'from textwrap import dedent\n'), ((1069, 1302), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n \\\\toprule\n & A & B & C \\\\\\\\\n \\\\midrule\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\bottomrule\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n \\\\toprule\n & A & B & C \\\\\\\\\n \\\\midrule\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\bottomrule\n \\\\end{tabular}\n """\n )\n', (1075, 1302), False, 'from textwrap import dedent\n'), ((1606, 1818), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n \\\\hline\n & A & B & C \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\otherline\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n \\\\hline\n & A & B & C \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\otherline\n \\\\end{tabular}\n """\n )\n', (1612, 1818), False, 'from textwrap import dedent\n'), ((2317, 2501), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lSSl}\n {} & {A} & {B} & {C} \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lSSl}\n {} & {A} & {B} & {C} \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (2323, 2501), False, 'from textwrap import dedent\n'), ((4744, 4863), 'textwrap.dedent', 'dedent', (['""" \\\\begin{table}\n \\\\mycommand{myoptions}\n \\\\mycommand2{myoptions2}\n """'], {}), '(\n """ \\\\begin{table}\n \\\\mycommand{myoptions}\n \\\\mycommand2{myoptions2}\n """\n )\n', (4750, 4863), False, 'from textwrap import dedent\n'), ((5015, 5248), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n & A & B & C \\\\\\\\\n 0 & 0 & \\\\itshape {\\\\Huge -0.61} & ab \\\\\\\\\n 1 & \\\\itshape {\\\\Huge 1} & -1.22 & \\\\itshape {\\\\Huge cd} \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n & A & B & C \\\\\\\\\n 0 & 0 & \\\\itshape {\\\\Huge -0.61} & ab \\\\\\\\\n 1 & \\\\itshape {\\\\Huge 1} & -1.22 & \\\\itshape {\\\\Huge cd} \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (5021, 5248), False, 'from textwrap import dedent\n'), ((5342, 5402), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (5364, 5402), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((5440, 5659), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n & \\\\multicolumn{2}{r}{A} & B \\\\\\\\\n & a & b & c \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n & \\\\multicolumn{2}{r}{A} & B \\\\\\\\\n & a & b & c \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (5446, 5659), False, 'from textwrap import dedent\n'), ((5772, 5974), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n & A & A & B \\\\\\\\\n & a & b & c \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n & A & A & B \\\\\\\\\n & a & b & c \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (5778, 5974), False, 'from textwrap import dedent\n'), ((6120, 6180), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (6142, 6180), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((6220, 6464), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{llrrl}\n & & A & B & C \\\\\\\\\n \\\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\\\\\\n & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{llrrl}\n & & A & B & C \\\\\\\\\n \\\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\\\\\\n & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (6226, 6464), False, 'from textwrap import dedent\n'), ((6611, 6835), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{llrrl}\n & & A & B & C \\\\\\\\\n A & a & 0 & -0.61 & ab \\\\\\\\\n A & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{llrrl}\n & & A & B & C \\\\\\\\\n A & a & 0 & -0.61 & ab \\\\\\\\\n A & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (6617, 6835), False, 'from textwrap import dedent\n'), ((6967, 7027), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('X', 'x'), ('X', 'y'), ('Y', 'z')]"], {}), "([('X', 'x'), ('X', 'y'), ('Y', 'z')])\n", (6989, 7027), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((7067, 7290), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{llrrl}\n & & A & B & C \\\\\\\\\n X & x & 0 & -0.61 & ab \\\\\\\\\n & y & 1 & -1.22 & cd \\\\\\\\\n Y & z & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{llrrl}\n & & A & B & C \\\\\\\\\n X & x & 0 & -0.61 & ab \\\\\\\\\n & y & 1 & -1.22 & cd \\\\\\\\\n Y & z & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (7073, 7290), False, 'from textwrap import dedent\n'), ((7480, 7540), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('Z', 'a'), ('Z', 'b'), ('Y', 'c')]"], {}), "([('Z', 'a'), ('Z', 'b'), ('Y', 'c')])\n", (7502, 7540), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((7552, 7612), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (7574, 7612), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((7674, 7964), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{llrrl}\n & & \\\\multicolumn{2}{l}{Z} & Y \\\\\\\\\n & & a & b & c \\\\\\\\\n \\\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\\\\\\n & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{llrrl}\n & & \\\\multicolumn{2}{l}{Z} & Y \\\\\\\\\n & & a & b & c \\\\\\\\\n \\\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\\\\\\n & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (7680, 7964), False, 'from textwrap import dedent\n'), ((8149, 8402), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{llrrl}\n & & Z & Z & Y \\\\\\\\\n & & a & b & c \\\\\\\\\n A & a & 0 & -0.61 & ab \\\\\\\\\n A & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{llrrl}\n & & Z & Z & Y \\\\\\\\\n & & a & b & c \\\\\\\\\n A & a & 0 & -0.61 & ab \\\\\\\\\n A & b & 1 & -1.22 & cd \\\\\\\\\n B & c & 2 & -2.22 & de \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (8155, 8402), False, 'from textwrap import dedent\n'), ((8843, 8903), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('A', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('A', 'c')])\n", (8865, 8903), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((9063, 9272), 'textwrap.dedent', 'dedent', (['f""" \\\\begin{{tabular}}{{{col_format}}}\n {header} \\\\\\\\\n {level1} \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{{tabular}}\n """'], {}), '(\n f""" \\\\begin{{tabular}}{{{col_format}}}\n {header} \\\\\\\\\n {level1} \\\\\\\\\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{{tabular}}\n """\n )\n', (9069, 9272), False, 'from textwrap import dedent\n'), ((9473, 9533), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('Z', 'a'), ('Z', 'b'), ('Y', 'c')]"], {}), "([('Z', 'a'), ('Z', 'b'), ('Y', 'c')])\n", (9495, 9533), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((9545, 9605), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (9567, 9605), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((9714, 9862), 'textwrap.dedent', 'dedent', (['""" & & \\\\multicolumn{2}{r}{Z} & Y \\\\\\\\\n & & a & b & c \\\\\\\\\n \\\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\\\\\\n """'], {}), '(\n """ & & \\\\multicolumn{2}{r}{Z} & Y \\\\\\\\\n & & a & b & c \\\\\\\\\n \\\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\\\\\\n """\n )\n', (9720, 9862), False, 'from textwrap import dedent\n'), ((10266, 10291), 'pandas.DataFrame', 'DataFrame', (['[[1, 2, 3, 4]]'], {}), '([[1, 2, 3, 4]])\n', (10275, 10291), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((10309, 10373), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 1), ('A', 2), ('A', 3), ('B', 1)]"], {}), "([('A', 1), ('A', 2), ('A', 3), ('B', 1)])\n", (10331, 10373), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((10863, 10923), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('Z', 'a'), ('Z', 'b'), ('Y', 'c')]"], {}), "([('Z', 'a'), ('Z', 'b'), ('Y', 'c')])\n", (10885, 10923), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((10935, 10995), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (10957, 10995), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((11292, 11456), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{rrl}\n A & B & C \\\\\\\\\n 0 & -0.61 & ab \\\\\\\\\n 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{rrl}\n A & B & C \\\\\\\\\n 0 & -0.61 & ab \\\\\\\\\n 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (11298, 11456), False, 'from textwrap import dedent\n'), ((11695, 11755), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('Z', 'a'), ('Z', 'b'), ('Y', 'c')]"], {}), "([('Z', 'a'), ('Z', 'b'), ('Y', 'c')])\n", (11717, 11755), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((11767, 11827), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (11789, 11827), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((18144, 18176), 'pandas.io.formats.style_render._parse_latex_css_conversion', '_parse_latex_css_conversion', (['css'], {}), '(css)\n', (18171, 18176), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((18734, 18880), 'textwrap.dedent', 'dedent', (['f""" 0 & 0 & \\\\{exp} -0.61 & ab \\\\\\\\\n 1 & \\\\{exp} 1 & -1.22 & \\\\{exp} cd \\\\\\\\\n \\\\end{{{inner_env}}}\n """'], {}), '(\n f""" 0 & 0 & \\\\{exp} -0.61 & ab \\\\\\\\\n 1 & \\\\{exp} 1 & -1.22 & \\\\{exp} cd \\\\\\\\\n \\\\end{{{inner_env}}}\n """\n )\n', (18740, 18880), False, 'from textwrap import dedent\n'), ((19070, 19102), 'pandas.io.formats.style_render._parse_latex_css_conversion', '_parse_latex_css_conversion', (['css'], {}), '(css)\n', (19097, 19102), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((19715, 20266), 'textwrap.dedent', 'dedent', (['""" \\\\begin{longtable}{lrrl}\n \\\\caption[short]{full} \\\\label{fig:A} \\\\\\\\\n \\\\toprule\n & A & B & C \\\\\\\\\n \\\\midrule\n \\\\endfirsthead\n \\\\caption[]{full} \\\\\\\\\n \\\\toprule\n & A & B & C \\\\\\\\\n \\\\midrule\n \\\\endhead\n \\\\midrule\n \\\\multicolumn{4}{r}{Continued on next page} \\\\\\\\\n \\\\midrule\n \\\\endfoot\n \\\\bottomrule\n \\\\endlastfoot\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{longtable}\n """'], {}), '(\n """ \\\\begin{longtable}{lrrl}\n \\\\caption[short]{full} \\\\label{fig:A} \\\\\\\\\n \\\\toprule\n & A & B & C \\\\\\\\\n \\\\midrule\n \\\\endfirsthead\n \\\\caption[]{full} \\\\\\\\\n \\\\toprule\n & A & B & C \\\\\\\\\n \\\\midrule\n \\\\endhead\n \\\\midrule\n \\\\multicolumn{4}{r}{Continued on next page} \\\\\\\\\n \\\\midrule\n \\\\endfoot\n \\\\bottomrule\n \\\\endlastfoot\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{longtable}\n """\n )\n', (19721, 20266), False, 'from textwrap import dedent\n'), ((20410, 20750), 'textwrap.dedent', 'dedent', (['""" \\\\begin{longtable}{lrrl}\n & A & B & C \\\\\\\\\n \\\\endfirsthead\n & A & B & C \\\\\\\\\n \\\\endhead\n \\\\multicolumn{4}{r}{Continued on next page} \\\\\\\\\n \\\\endfoot\n \\\\endlastfoot\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{longtable}\n """'], {}), '(\n """ \\\\begin{longtable}{lrrl}\n & A & B & C \\\\\\\\\n \\\\endfirsthead\n & A & B & C \\\\\\\\\n \\\\endhead\n \\\\multicolumn{4}{r}{Continued on next page} \\\\\\\\\n \\\\endfoot\n \\\\endlastfoot\n 0 & 0 & -0.61 & ab \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{longtable}\n """\n )\n', (20416, 20750), False, 'from textwrap import dedent\n'), ((21136, 21196), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (21158, 21196), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((21313, 21577), 'textwrap.dedent', 'dedent', (['f""" \\\\begin{{longtable}}{{l{\'SS\' if siunitx else \'rr\'}l}}\n {exp} \\\\\\\\\n {with_si if siunitx else without_si}\n \\\\endfirsthead\n {exp} \\\\\\\\\n {with_si if siunitx else without_si}\n \\\\endhead\n """'], {}), '(\n f""" \\\\begin{{longtable}}{{l{\'SS\' if siunitx else \'rr\'}l}}\n {exp} \\\\\\\\\n {with_si if siunitx else without_si}\n \\\\endfirsthead\n {exp} \\\\\\\\\n {with_si if siunitx else without_si}\n \\\\endhead\n """\n )\n', (21319, 21577), False, 'from textwrap import dedent\n'), ((22150, 22289), 'textwrap.dedent', 'dedent', (['f""" {cap_exp1}{lab_exp} \\\\\\\\\n & A & B & C \\\\\\\\\n \\\\endfirsthead\n {cap_exp2} \\\\\\\\\n """'], {}), '(\n f""" {cap_exp1}{lab_exp} \\\\\\\\\n & A & B & C \\\\\\\\\n \\\\endfirsthead\n {cap_exp2} \\\\\\\\\n """\n )\n', (22156, 22289), False, 'from textwrap import dedent\n'), ((22670, 22730), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('Z', 'a'), ('Z', 'b'), ('Y', 'c')]"], {}), "([('Z', 'a'), ('Z', 'b'), ('Y', 'c')])\n", (22692, 22730), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((22742, 22802), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('A', 'a'), ('A', 'b'), ('B', 'c')]"], {}), "([('A', 'a'), ('A', 'b'), ('B', 'c')])\n", (22764, 22802), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((23165, 23357), 'textwrap.dedent', 'dedent', (['""" \\\\multirow[c]{2}{*}{\\\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\\\\\\n \\\\bfseries{} & b & 1 & -1.220000 & cd \\\\\\\\\n B & \\\\bfseries{c} & 2 & -2.220000 & de \\\\\\\\\n """'], {}), '(\n """ \\\\multirow[c]{2}{*}{\\\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\\\\\\n \\\\bfseries{} & b & 1 & -1.220000 & cd \\\\\\\\\n B & \\\\bfseries{c} & 2 & -2.220000 & de \\\\\\\\\n """\n )\n', (23171, 23357), False, 'from textwrap import dedent\n'), ((23430, 23565), 'textwrap.dedent', 'dedent', (['""" {} & {} & \\\\multicolumn{2}{r}{\\\\bfseries{Z}} & {Y} \\\\\\\\\n {} & {} & {a} & {b} & {\\\\bfseries{c}} \\\\\\\\\n """'], {}), '(\n """ {} & {} & \\\\multicolumn{2}{r}{\\\\bfseries{Z}} & {Y} \\\\\\\\\n {} & {} & {a} & {b} & {\\\\bfseries{c}} \\\\\\\\\n """\n )\n', (23436, 23565), False, 'from textwrap import dedent\n'), ((25023, 25147), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{lrrl}\n & A & B & C \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{lrrl}\n & A & B & C \\\\\\\\\n 1 & 1 & -1.22 & cd \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (25029, 25147), False, 'from textwrap import dedent\n'), ((25272, 25366), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (["[['i0', 'j0'], ['i1'], ['i2', 'j2']]"], {'names': "['i-0', 'i-1', 'i-2']"}), "([['i0', 'j0'], ['i1'], ['i2', 'j2']], names=['i-0',\n 'i-1', 'i-2'])\n", (25295, 25366), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((25388, 25482), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (["[['c0'], ['c1', 'd1'], ['c2', 'd2']]"], {'names': "['c-0', 'c-1', 'c-2']"}), "([['c0'], ['c1', 'd1'], ['c2', 'd2']], names=['c-0',\n 'c-1', 'c-2'])\n", (25411, 25482), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((25580, 25602), 'pandas.io.formats.style.Styler', 'Styler', (['df'], {'uuid_len': '(0)'}), '(df, uuid_len=0)\n', (25586, 25602), False, 'from pandas.io.formats.style import Styler\n'), ((25933, 26330), 'textwrap.dedent', 'dedent', (['""" \\\\begin{tabular}{llrrr}\n & c-1 & c1 & \\\\multicolumn{2}{r}{d1} \\\\\\\\\n & c-2 & d2 & c2 & d2 \\\\\\\\\n i-0 & i-2 & & & \\\\\\\\\n i0 & \\\\color{blue} j2 & \\\\color{red} 5 & 6 & 7 \\\\\\\\\n \\\\multirow[c]{2}{*}{\\\\color{blue} j0} & i2 & 9 & 10 & 11 \\\\\\\\\n \\\\color{blue} & \\\\color{blue} j2 & 13 & 14 & 15 \\\\\\\\\n \\\\end{tabular}\n """'], {}), '(\n """ \\\\begin{tabular}{llrrr}\n & c-1 & c1 & \\\\multicolumn{2}{r}{d1} \\\\\\\\\n & c-2 & d2 & c2 & d2 \\\\\\\\\n i-0 & i-2 & & & \\\\\\\\\n i0 & \\\\color{blue} j2 & \\\\color{red} 5 & 6 & 7 \\\\\\\\\n \\\\multirow[c]{2}{*}{\\\\color{blue} j0} & i2 & 9 & 10 & 11 \\\\\\\\\n \\\\color{blue} & \\\\color{blue} j2 & 13 & 14 & 15 \\\\\\\\\n \\\\end{tabular}\n """\n )\n', (25939, 26330), False, 'from textwrap import dedent\n'), ((3340, 3376), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (3353, 3376), False, 'import pytest\n'), ((3514, 3550), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (3527, 3550), False, 'import pytest\n'), ((9940, 9990), 'pandas.option_context', 'option_context', (['"""styler.latex.multicol_align"""', '"""l"""'], {}), "('styler.latex.multicol_align', 'l')\n", (9954, 9990), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((10078, 10128), 'pandas.option_context', 'option_context', (['"""styler.latex.multirow_align"""', '"""b"""'], {}), "('styler.latex.multirow_align', 'b')\n", (10092, 10128), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((11109, 11138), 'pandas.option_context', 'option_context', (['option', 'value'], {}), '(option, value)\n', (11123, 11138), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((13384, 13437), 'pandas.option_context', 'option_context', (['"""styler.latex.environment"""', '"""bar-env"""'], {}), "('styler.latex.environment', 'bar-env')\n", (13398, 13437), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((13962, 14015), 'pandas.io.formats.style_render._parse_latex_table_styles', '_parse_latex_table_styles', (['styler.table_styles', '"""bar"""'], {}), "(styler.table_styles, 'bar')\n", (13987, 14015), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((14092, 14147), 'pandas.io.formats.style_render._parse_latex_table_styles', '_parse_latex_table_styles', (['styler.table_styles', '"""label"""'], {}), "(styler.table_styles, 'label')\n", (14117, 14147), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((14373, 14417), 'pandas.io.formats.style_render._parse_latex_cell_styles', '_parse_latex_cell_styles', (['cell_style', '"""text"""'], {}), "(cell_style, 'text')\n", (14397, 14417), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((15012, 15067), 'pandas.io.formats.style_render._parse_latex_cell_styles', '_parse_latex_cell_styles', (['cell_style', '"""<display_value>"""'], {}), "(cell_style, '<display_value>')\n", (15036, 15067), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((15255, 15295), 'pandas.io.formats.style_render._parse_latex_header_span', '_parse_latex_header_span', (['cell', '"""X"""', '"""Y"""'], {}), "(cell, 'X', 'Y')\n", (15279, 15295), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((15446, 15486), 'pandas.io.formats.style_render._parse_latex_header_span', '_parse_latex_header_span', (['cell', '"""X"""', '"""Y"""'], {}), "(cell, 'X', 'Y')\n", (15470, 15486), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((15565, 15605), 'pandas.io.formats.style_render._parse_latex_header_span', '_parse_latex_header_span', (['cell', '"""X"""', '"""Y"""'], {}), "(cell, 'X', 'Y')\n", (15589, 15605), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((15705, 15745), 'pandas.io.formats.style_render._parse_latex_header_span', '_parse_latex_header_span', (['cell', '"""X"""', '"""Y"""'], {}), "(cell, 'X', 'Y')\n", (15729, 15745), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((16114, 16178), 'pandas.io.formats.style_render._parse_latex_table_wrapping', '_parse_latex_table_wrapping', (['styler.table_styles', 'styler.caption'], {}), '(styler.table_styles, styler.caption)\n', (16141, 16178), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((16199, 16263), 'pandas.io.formats.style_render._parse_latex_table_wrapping', '_parse_latex_table_wrapping', (['styler.table_styles', '"""some caption"""'], {}), "(styler.table_styles, 'some caption')\n", (16226, 16263), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((16424, 16478), 'pandas.io.formats.style_render._parse_latex_table_wrapping', '_parse_latex_table_wrapping', (['styler.table_styles', 'None'], {}), '(styler.table_styles, None)\n', (16451, 16478), False, 'from pandas.io.formats.style_render import _parse_latex_cell_styles, _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping\n'), ((23894, 23939), 'pandas.option_context', 'option_context', (['"""styler.render.repr"""', '"""latex"""'], {}), "('styler.render.repr', 'latex')\n", (23908, 23939), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((24146, 24193), 'pandas.option_context', 'option_context', (['f"""styler.latex.{option}"""', '(False)'], {}), "(f'styler.latex.{option}', False)\n", (24160, 24193), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((24244, 24290), 'pandas.option_context', 'option_context', (['f"""styler.latex.{option}"""', '(True)'], {}), "(f'styler.latex.{option}', True)\n", (24258, 24290), False, 'from pandas import DataFrame, MultiIndex, option_context\n'), ((25512, 25525), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (25521, 25525), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
# if you get the error: "TypeError: 'figure' is an unknown keyword argument"
# uncomment the line below:
# matplotlib.use('Qt4Agg')
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
except ImportError as e:
print(e)
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
exit()
def plot_with_labels(low_dim_embs, labels, filename='tsne_embeddings.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
print("plots saved in {0}".format(filename))
if __name__ == "__main__":
# Step 6: Visualize the embeddings.
reverse_dictionary = np.load("Idx2Word.npy").item()
embeddings = np.load("CBOW_Embeddings.npy")
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)
plt.show();
| [
"matplotlib.pyplot.savefig",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"numpy.load",
"matplotlib.pyplot.show"
] | [((581, 609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 18)'}), '(figsize=(18, 18))\n', (591, 609), True, 'import matplotlib.pyplot as plt\n'), ((939, 960), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (950, 960), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1181), 'numpy.load', 'np.load', (['"""CBOW_Embeddings.npy"""'], {}), "('CBOW_Embeddings.npy')\n", (1158, 1181), True, 'import numpy as np\n'), ((1193, 1269), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(30)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(5000)', 'method': '"""exact"""'}), "(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')\n", (1197, 1269), False, 'from sklearn.manifold import TSNE\n'), ((1465, 1475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1473, 1475), True, 'import matplotlib.pyplot as plt\n'), ((704, 721), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (715, 721), True, 'import matplotlib.pyplot as plt\n'), ((730, 832), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x, y)', 'xytext': '(5, 2)', 'textcoords': '"""offset points"""', 'ha': '"""right"""', 'va': '"""bottom"""'}), "(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',\n ha='right', va='bottom')\n", (742, 832), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1126), 'numpy.load', 'np.load', (['"""Idx2Word.npy"""'], {}), "('Idx2Word.npy')\n", (1110, 1126), True, 'import numpy as np\n')] |
import mock
import numpy as np
from emukit.core import ParameterSpace
from emukit.core.acquisition import Acquisition
from emukit.core.constraints import IConstraint
from emukit.core.optimization.anchor_points_generator import ObjectiveAnchorPointsGenerator
def test_objective_anchor_point_generator():
num_samples = 5
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.evaluate.return_value = np.arange(num_samples)[:, None]
space = mock.create_autospec(ParameterSpace)
space.sample_uniform.return_value = np.arange(num_samples)[:, None]
space.constraints = []
generator = ObjectiveAnchorPointsGenerator(space, mock_acquisition, num_samples=num_samples)
anchor_points = generator.get(1)
# Check that the X that is picked corresponds to the highest acquisition value
assert np.array_equal(anchor_points, np.array([[num_samples-1]]))
def test_constrained_objective_anchor_point_generator():
num_samples = 5
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.evaluate = lambda x: x
space = mock.create_autospec(ParameterSpace)
space.sample_uniform.return_value = np.arange(num_samples)[:, None]
constraint = mock.create_autospec(IConstraint)
constraint.evaluate.return_value = np.array([1, 1, 1, 0, 0])
space.constraints = [constraint]
generator = ObjectiveAnchorPointsGenerator(space, mock_acquisition, num_samples=num_samples)
anchor_points = generator.get(1)
# Check that the X that is picked corresponds to the highest acquisition value
assert np.array_equal(anchor_points, np.array([[2]]))
| [
"mock.create_autospec",
"numpy.array",
"emukit.core.optimization.anchor_points_generator.ObjectiveAnchorPointsGenerator",
"numpy.arange"
] | [((349, 382), 'mock.create_autospec', 'mock.create_autospec', (['Acquisition'], {}), '(Acquisition)\n', (369, 382), False, 'import mock\n'), ((473, 509), 'mock.create_autospec', 'mock.create_autospec', (['ParameterSpace'], {}), '(ParameterSpace)\n', (493, 509), False, 'import mock\n'), ((626, 711), 'emukit.core.optimization.anchor_points_generator.ObjectiveAnchorPointsGenerator', 'ObjectiveAnchorPointsGenerator', (['space', 'mock_acquisition'], {'num_samples': 'num_samples'}), '(space, mock_acquisition, num_samples=num_samples\n )\n', (656, 711), False, 'from emukit.core.optimization.anchor_points_generator import ObjectiveAnchorPointsGenerator\n'), ((1000, 1033), 'mock.create_autospec', 'mock.create_autospec', (['Acquisition'], {}), '(Acquisition)\n', (1020, 1033), False, 'import mock\n'), ((1091, 1127), 'mock.create_autospec', 'mock.create_autospec', (['ParameterSpace'], {}), '(ParameterSpace)\n', (1111, 1127), False, 'import mock\n'), ((1218, 1251), 'mock.create_autospec', 'mock.create_autospec', (['IConstraint'], {}), '(IConstraint)\n', (1238, 1251), False, 'import mock\n'), ((1291, 1316), 'numpy.array', 'np.array', (['[1, 1, 1, 0, 0]'], {}), '([1, 1, 1, 0, 0])\n', (1299, 1316), True, 'import numpy as np\n'), ((1372, 1457), 'emukit.core.optimization.anchor_points_generator.ObjectiveAnchorPointsGenerator', 'ObjectiveAnchorPointsGenerator', (['space', 'mock_acquisition'], {'num_samples': 'num_samples'}), '(space, mock_acquisition, num_samples=num_samples\n )\n', (1402, 1457), False, 'from emukit.core.optimization.anchor_points_generator import ObjectiveAnchorPointsGenerator\n'), ((428, 450), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (437, 450), True, 'import numpy as np\n'), ((550, 572), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (559, 572), True, 'import numpy as np\n'), ((869, 898), 'numpy.array', 'np.array', (['[[num_samples - 1]]'], {}), '([[num_samples - 1]])\n', (877, 898), True, 'import numpy as np\n'), ((1168, 1190), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (1177, 1190), True, 'import numpy as np\n'), ((1615, 1630), 'numpy.array', 'np.array', (['[[2]]'], {}), '([[2]])\n', (1623, 1630), True, 'import numpy as np\n')] |
import datetime, dateutil.relativedelta
import pandas as pd
import numpy as np
from .settings import WORLD_CPI, WORLD_CY, WORLD_ER
def _get_value(date, df, type_, fpath=None):
"""
_get_value looks up the value of a cell for a given date (date) in a table provided by the Federal Statistical Office.
:param date: the date for which to get a cpi for.
:param df: the dataframe in which to look for the value.
:param type_: the type of information in the file (cpi? exchange value?)
:param fpath: the filepath of the dataframe. Used for recursive miss prevention.
:return: the value of a cell in a dataframe for a given date (date).
"""
#df_cpi = pd.read_csv(DATA_CPI, sep=";")
try:
target_cpi = df[str(date.year)].values[0]
if np.isnan(target_cpi):
try:
alt_code = get_exchange_rate_region(date, df['Country Code'].values[0])
alt_df = get_dataframe(alt_code, fpath)
val = _get_value(date=date, df=alt_df, type_ = type_, fpath = fpath)
return val
except Exception:
raise ("Not a number.")
return float(target_cpi)
except Exception as e:
f_occ = _get_occurence(df)
l_occ = _get_occurence(df, True)
raise ValueError("Couldn't find a(n) {} for the given date. Supported dates for the given currency range from {} to {}".format(type_, f_occ, l_occ))
def _get_occurence(df, do_reverse = False):
"""
_get_occurence finds and returns the first/ last date for which a value was recorded in a dataframe.
:param df: the dataframe to look through.
:param do_reverse: specifies, whether to look for the first/ last occurence.
:return: the year of the first/ last occurence.
"""
import math
if do_reverse:
df = df[df.columns[::-1]]
for col in df:
val = df[col].values[0]
try:
float(val)
if not math.isnan(float(val)):
return df[col].name
except ValueError:
continue
def get_cpi(date, df_cpi):
return _get_value(date, df_cpi, "cpi", WORLD_CPI)
def get_exchange_rate(date, df_er):
return _get_value(date, df_er, "exchange rate", WORLD_ER)
def get_valid_date(date):
"""
get_valid_date tries to convert a given string into a valid datetime object (YYYY-mm-dd).
:param date: the date to check for validity. May be str or datetime.datetime object.
:return: the given date as a datetime.datetime object in a "YYYY-mm-dd" format.
"""
try:
date_ = datetime.datetime.strptime(date, '%Y-%m-%d').date()
return date_
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-mm-dd")
def validate_dates(dates):
"""
validate_dates turns a list of dates into valid dates using the \'get_valid_date\' method.
:param dates: the list of dates to check for validity. May be list of str or datetime.datetime objects. Alternatively, when no second date is specified, it will return a list containing the first date and today's date from a month ago.
:return: the given list's dates as datetime.datetime object in a "YYYY-mm-dd" format.
"""
dates[0] = get_valid_date(dates[0])
if dates[1] == None:
dates[1] = datetime.date.today() - dateutil.relativedelta.relativedelta(years=1)
else:
dates[1] = get_valid_date(dates[1])
if dates[0] > dates[1]:
raise ValueError("Invalid order of dates. \'from_date\' has to be earlier than \'to_date\'.")
return dates
def get_dataframe(country_code, filename_):
"""
get_dataframe extracts the country's row specified by \'country_code\'.
:param country_code: the list of dates to check for validity. May be list of str or datetime.datetime objects. Alternatively, when no second date is specified, it will return a list containing the first date and today's date from a month ago.
:return: the given list's dates as datetime.datetime object in a "YYYY-mm-dd" format.
"""
df = pd.read_csv(filename_, skiprows=4)
matching_indices = df.index[df['Country Code'] == country_code].tolist()
if len(matching_indices) == 0:
raise ValueError("No matching dataset found for the given country code ({})".format(country_code))
res_df = df.loc[matching_indices]
return res_df
def get_exchange_rate_region(date, country_code):
"""
get_exchange_rate_region looks up the country code of a newly adopted currency.
:param date: the date at which to check for the alternative currency.
:param country_code: the country code for which to find its alternative for.
:return: the newly adopted currency code.
"""
df = pd.read_csv(WORLD_CY, skiprows=4)
vals = df.loc[df['Country Code'] == country_code][['New Country Code', 'Yielded']].values
if vals[0][1] <= date.year:
return vals[0][0]
| [
"datetime.datetime.strptime",
"datetime.date.today",
"numpy.isnan",
"pandas.read_csv"
] | [((4071, 4105), 'pandas.read_csv', 'pd.read_csv', (['filename_'], {'skiprows': '(4)'}), '(filename_, skiprows=4)\n', (4082, 4105), True, 'import pandas as pd\n'), ((4743, 4776), 'pandas.read_csv', 'pd.read_csv', (['WORLD_CY'], {'skiprows': '(4)'}), '(WORLD_CY, skiprows=4)\n', (4754, 4776), True, 'import pandas as pd\n'), ((783, 803), 'numpy.isnan', 'np.isnan', (['target_cpi'], {}), '(target_cpi)\n', (791, 803), True, 'import numpy as np\n'), ((3313, 3334), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3332, 3334), False, 'import datetime, dateutil.relativedelta\n'), ((2590, 2634), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (2616, 2634), False, 'import datetime, dateutil.relativedelta\n')] |
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag,
iscomplexobj, tril, triu, argsort, empty_like)
from .decomp import _asarray_validated
from .lapack import get_lapack_funcs, _compute_lwork
__all__ = ['ldl']
def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True):
""" Computes the LDLt or Bunch-Kaufman factorization of a symmetric/
hermitian matrix.
This function returns a block diagonal matrix D consisting blocks of size
at most 2x2 and also a possibly permuted unit lower triangular matrix
``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T``
holds. If ``lower`` is False then (again possibly permuted) upper
triangular matrices are returned as outer factors.
The permutation array can be used to triangularize the outer factors
simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower
triangular matrix. This is also equivalent to multiplication with a
permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted
identity matrix ``I[:, perm]``.
Depending on the value of the boolean ``lower``, only upper or lower
triangular part of the input array is referenced. Hence, a triangular
matrix on entry would give the same result as if the full matrix is
supplied.
Parameters
----------
a : array_like
Square input array
lower : bool, optional
This switches between the lower and upper triangular outer factors of
the factorization. Lower triangular (``lower=True``) is the default.
hermitian : bool, optional
For complex-valued arrays, this defines whether ``a = a.conj().T`` or
``a = a.T`` is assumed. For real-valued arrays, this switch has no
effect.
overwrite_a : bool, optional
Allow overwriting data in ``a`` (may enhance performance). The default
is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : ndarray
The (possibly) permuted upper/lower triangular outer factor of the
factorization.
d : ndarray
The block diagonal multiplier of the factorization.
perm : ndarray
The row-permutation index array that brings lu into triangular form.
Raises
------
ValueError
If input array is not square.
ComplexWarning
If a complex-valued array with nonzero imaginary parts on the
diagonal is given and hermitian is set to True.
Examples
--------
Given an upper triangular array `a` that represents the full symmetric
array with its entries, obtain `l`, 'd' and the permutation vector `perm`:
>>> import numpy as np
>>> from scipy.linalg import ldl
>>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]])
>>> lu, d, perm = ldl(a, lower=0) # Use the upper part
>>> lu
array([[ 0. , 0. , 1. ],
[ 0. , 1. , -0.5],
[ 1. , 1. , 1.5]])
>>> d
array([[-5. , 0. , 0. ],
[ 0. , 1.5, 0. ],
[ 0. , 0. , 2. ]])
>>> perm
array([2, 1, 0])
>>> lu[perm, :]
array([[ 1. , 1. , 1.5],
[ 0. , 1. , -0.5],
[ 0. , 0. , 1. ]])
>>> lu.dot(d).dot(lu.T)
array([[ 2., -1., 3.],
[-1., 2., 0.],
[ 3., 0., 1.]])
Notes
-----
This function uses ``?SYTRF`` routines for symmetric matrices and
``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for
the algorithm details.
Depending on the ``lower`` keyword value, only lower or upper triangular
part of the input array is referenced. Moreover, this keyword also defines
the structure of the outer factors of the factorization.
.. versionadded:: 1.1.0
See also
--------
cholesky, lu
References
----------
.. [1] <NAME>, <NAME>, Some stable methods for calculating
inertia and solving symmetric linear systems, Math. Comput. Vol.31,
1977. DOI: 10.2307/2005787
"""
a = atleast_2d(_asarray_validated(A, check_finite=check_finite))
if a.shape[0] != a.shape[1]:
raise ValueError('The input array "a" should be square.')
# Return empty arrays for empty square input
if a.size == 0:
return empty_like(a), empty_like(a), np.array([], dtype=int)
n = a.shape[0]
r_or_c = complex if iscomplexobj(a) else float
# Get the LAPACK routine
if r_or_c is complex and hermitian:
s, sl = 'hetrf', 'hetrf_lwork'
if np.any(imag(diag(a))):
warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal'
'are ignored. Use "hermitian=False" for factorization of'
'complex symmetric arrays.', ComplexWarning, stacklevel=2)
else:
s, sl = 'sytrf', 'sytrf_lwork'
solver, solver_lwork = get_lapack_funcs((s, sl), (a,))
lwork = _compute_lwork(solver_lwork, n, lower=lower)
ldu, piv, info = solver(a, lwork=lwork, lower=lower,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('{} exited with the internal error "illegal value '
'in argument number {}". See LAPACK documentation '
'for the error codes.'.format(s.upper(), -info))
swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower)
d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian)
lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower)
return lu, d, perm
def _ldl_sanitize_ipiv(a, lower=True):
"""
This helper function takes the rather strangely encoded permutation array
returned by the LAPACK routines ?(HE/SY)TRF and converts it into
regularized permutation and diagonal pivot size format.
Since FORTRAN uses 1-indexing and LAPACK uses different start points for
upper and lower formats there are certain offsets in the indices used
below.
Let's assume a result where the matrix is 6x6 and there are two 2x2
and two 1x1 blocks reported by the routine. To ease the coding efforts,
we still populate a 6-sized array and fill zeros as the following ::
pivots = [2, 0, 2, 0, 1, 1]
This denotes a diagonal matrix of the form ::
[x x ]
[x x ]
[ x x ]
[ x x ]
[ x ]
[ x]
In other words, we write 2 when the 2x2 block is first encountered and
automatically write 0 to the next entry and skip the next spin of the
loop. Thus, a separate counter or array appends to keep track of block
sizes are avoided. If needed, zeros can be filtered out later without
losing the block structure.
Parameters
----------
a : ndarray
The permutation array ipiv returned by LAPACK
lower : bool, optional
The switch to select whether upper or lower triangle is chosen in
the LAPACK call.
Returns
-------
swap_ : ndarray
The array that defines the row/column swap operations. For example,
if row two is swapped with row four, the result is [0, 3, 2, 3].
pivots : ndarray
The array that defines the block diagonal structure as given above.
"""
n = a.size
swap_ = arange(n)
pivots = zeros_like(swap_, dtype=int)
skip_2x2 = False
# Some upper/lower dependent offset values
# range (s)tart, r(e)nd, r(i)ncrement
x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1)
for ind in range(rs, re, ri):
# If previous spin belonged already to a 2x2 block
if skip_2x2:
skip_2x2 = False
continue
cur_val = a[ind]
# do we have a 1x1 block or not?
if cur_val > 0:
if cur_val != ind+1:
# Index value != array value --> permutation required
swap_[ind] = swap_[cur_val-1]
pivots[ind] = 1
# Not.
elif cur_val < 0 and cur_val == a[ind+x]:
# first neg entry of 2x2 block identifier
if -cur_val != ind+2:
# Index value != array value --> permutation required
swap_[ind+x] = swap_[-cur_val-1]
pivots[ind+y] = 2
skip_2x2 = True
else: # Doesn't make sense, give up
raise ValueError('While parsing the permutation array '
'in "scipy.linalg.ldl", invalid entries '
'found. The array syntax is invalid.')
return swap_, pivots
def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):
"""
Helper function to extract the diagonal and triangular matrices for
LDL.T factorization.
Parameters
----------
ldu : ndarray
The compact output returned by the LAPACK routing
pivs : ndarray
The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For
every 2 there is a succeeding 0.
lower : bool, optional
If set to False, upper triangular part is considered.
hermitian : bool, optional
If set to False a symmetric complex array is assumed.
Returns
-------
d : ndarray
The block diagonal matrix.
lu : ndarray
The upper/lower triangular matrix
"""
is_c = iscomplexobj(ldu)
d = diag(diag(ldu))
n = d.shape[0]
blk_i = 0 # block index
# row/column offsets for selecting sub-, super-diagonal
x, y = (1, 0) if lower else (0, 1)
lu = tril(ldu, -1) if lower else triu(ldu, 1)
diag_inds = arange(n)
lu[diag_inds, diag_inds] = 1
for blk in pivs[pivs != 0]:
# increment the block index and check for 2s
# if 2 then copy the off diagonals depending on uplo
inc = blk_i + blk
if blk == 2:
d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y]
# If Hermitian matrix is factorized, the cross-offdiagonal element
# should be conjugated.
if is_c and hermitian:
d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj()
else:
d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y]
lu[blk_i+x, blk_i+y] = 0.
blk_i = inc
return d, lu
def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True):
"""
Helper function to construct explicit outer factors of LDL factorization.
If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k).
Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See
LAPACK documentation for more details.
Parameters
----------
lu : ndarray
The triangular array that is extracted from LAPACK routine call with
ones on the diagonals.
swap_vec : ndarray
The array that defines the row swapping indices. If the kth entry is m
then rows k,m are swapped. Notice that the mth entry is not necessarily
k to avoid undoing the swapping.
pivs : ndarray
The array that defines the block diagonal structure returned by
_ldl_sanitize_ipiv().
lower : bool, optional
The boolean to switch between lower and upper triangular structure.
Returns
-------
lu : ndarray
The square outer factor which satisfies the L * D * L.T = A
perm : ndarray
The permutation vector that brings the lu to the triangular form
Notes
-----
Note that the original argument "lu" is overwritten.
"""
n = lu.shape[0]
perm = arange(n)
# Setup the reading order of the permutation matrix for upper/lower
rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1)
for ind in range(rs, re, ri):
s_ind = swap_vec[ind]
if s_ind != ind:
# Column start and end positions
col_s = ind if lower else 0
col_e = n if lower else ind+1
# If we stumble upon a 2x2 block include both cols in the perm.
if pivs[ind] == (0 if lower else 2):
col_s += -1 if lower else 0
col_e += 0 if lower else 1
lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e]
perm[[s_ind, ind]] = perm[[ind, s_ind]]
return lu, argsort(perm)
| [
"numpy.diag",
"numpy.argsort",
"numpy.iscomplexobj",
"numpy.array",
"numpy.empty_like",
"numpy.tril",
"warnings.warn",
"numpy.triu",
"numpy.zeros_like",
"numpy.arange"
] | [((7625, 7634), 'numpy.arange', 'arange', (['n'], {}), '(n)\n', (7631, 7634), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((7648, 7676), 'numpy.zeros_like', 'zeros_like', (['swap_'], {'dtype': 'int'}), '(swap_, dtype=int)\n', (7658, 7676), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((9653, 9670), 'numpy.iscomplexobj', 'iscomplexobj', (['ldu'], {}), '(ldu)\n', (9665, 9670), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((9910, 9919), 'numpy.arange', 'arange', (['n'], {}), '(n)\n', (9916, 9919), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((11852, 11861), 'numpy.arange', 'arange', (['n'], {}), '(n)\n', (11858, 11861), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((4714, 4729), 'numpy.iscomplexobj', 'iscomplexobj', (['a'], {}), '(a)\n', (4726, 4729), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((9684, 9693), 'numpy.diag', 'diag', (['ldu'], {}), '(ldu)\n', (9688, 9693), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((9853, 9866), 'numpy.tril', 'tril', (['ldu', '(-1)'], {}), '(ldu, -1)\n', (9857, 9866), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((9881, 9893), 'numpy.triu', 'triu', (['ldu', '(1)'], {}), '(ldu, 1)\n', (9885, 9893), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((12561, 12574), 'numpy.argsort', 'argsort', (['perm'], {}), '(perm)\n', (12568, 12574), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((4616, 4629), 'numpy.empty_like', 'empty_like', (['a'], {}), '(a)\n', (4626, 4629), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((4631, 4644), 'numpy.empty_like', 'empty_like', (['a'], {}), '(a)\n', (4641, 4644), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n'), ((4646, 4669), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4654, 4669), True, 'import numpy as np\n'), ((4896, 5083), 'warnings.warn', 'warn', (['"""scipy.linalg.ldl():\nThe imaginary parts of the diagonalare ignored. Use "hermitian=False" for factorization ofcomplex symmetric arrays."""', 'ComplexWarning'], {'stacklevel': '(2)'}), '(\n """scipy.linalg.ldl():\nThe imaginary parts of the diagonalare ignored. Use "hermitian=False" for factorization ofcomplex symmetric arrays."""\n , ComplexWarning, stacklevel=2)\n', (4900, 5083), False, 'from warnings import warn\n'), ((4873, 4880), 'numpy.diag', 'diag', (['a'], {}), '(a)\n', (4877, 4880), False, 'from numpy import atleast_2d, ComplexWarning, arange, zeros_like, imag, diag, iscomplexobj, tril, triu, argsort, empty_like\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.