code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import warnings
warnings.filterwarnings('ignore') # noqa
from typing import Union, Iterable, Optional
import logging
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
matplotlib.use('Agg') # noqa
import albumentations as A
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import models
from rastervision.pipeline.config import ConfigError
from rastervision.pytorch_learner.learner import Learner
from rastervision.pytorch_learner.utils import (
compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor,
Parallel, AddTensors)
from rastervision.pipeline.file_system import make_dir
log = logging.getLogger(__name__)
class SemanticSegmentationLearner(Learner):
def build_model(self) -> nn.Module:
# TODO support FCN option
pretrained = self.cfg.model.pretrained
out_classes = len(self.cfg.data.class_names)
if self.cfg.solver.ignore_last_class:
out_classes -= 1
model = models.segmentation.segmentation._segm_model(
'deeplabv3',
self.cfg.model.get_backbone_str(),
out_classes,
False,
pretrained_backbone=pretrained)
input_channels = self.cfg.data.img_channels
old_conv = model.backbone.conv1
if input_channels == old_conv.in_channels:
return model
# these parameters will be the same for the new conv layer
old_conv_args = {
'out_channels': old_conv.out_channels,
'kernel_size': old_conv.kernel_size,
'stride': old_conv.stride,
'padding': old_conv.padding,
'dilation': old_conv.dilation,
'groups': old_conv.groups,
'bias': old_conv.bias
}
if not pretrained:
# simply replace the first conv layer with one with the
# correct number of input channels
new_conv = nn.Conv2d(in_channels=input_channels, **old_conv_args)
model.backbone.conv1 = new_conv
return model
if input_channels > old_conv.in_channels:
# insert a new conv layer parallel to the existing one
# and sum their outputs
new_conv_channels = input_channels - old_conv.in_channels
new_conv = nn.Conv2d(
in_channels=new_conv_channels, **old_conv_args)
model.backbone.conv1 = nn.Sequential(
# split input along channel dim
SplitTensor((old_conv.in_channels, new_conv_channels), dim=1),
# each split goes to its respective conv layer
Parallel(old_conv, new_conv),
# sum the parallel outputs
AddTensors())
elif input_channels < old_conv.in_channels:
model.backbone.conv1 = nn.Conv2d(
in_channels=input_channels, **old_conv_args)
model.backbone.conv1.weight.data[:, :input_channels] = \
old_conv.weight.data[:, :input_channels]
else:
raise ConfigError(f'Something went wrong')
return model
def build_loss(self):
args = {}
loss_weights = self.cfg.solver.class_loss_weights
if loss_weights is not None:
loss_weights = torch.tensor(loss_weights, device=self.device)
args.update({'weight': loss_weights})
if self.cfg.solver.ignore_last_class:
num_classes = len(self.cfg.data.class_names)
args.update({'ignore_index': num_classes - 1})
loss = nn.CrossEntropyLoss(**args)
return loss
def train_step(self, batch, batch_ind):
x, y = batch
out = self.post_forward(self.model(x))
return {'train_loss': self.loss(out, y)}
def validate_step(self, batch, batch_ind):
x, y = batch
out = self.post_forward(self.model(x))
val_loss = self.loss(out, y)
num_labels = len(self.cfg.data.class_names)
y = y.view(-1)
out = self.prob_to_pred(out).view(-1)
conf_mat = compute_conf_mat(out, y, num_labels)
return {'val_loss': val_loss, 'conf_mat': conf_mat}
def validate_end(self, outputs, num_samples):
conf_mat = sum([o['conf_mat'] for o in outputs])
val_loss = torch.stack([o['val_loss']
for o in outputs]).sum() / num_samples
conf_mat_metrics = compute_conf_mat_metrics(conf_mat,
self.cfg.data.class_names)
metrics = {'val_loss': val_loss.item()}
metrics.update(conf_mat_metrics)
return metrics
def post_forward(self, x):
if isinstance(x, dict):
return x['out']
return x
def predict(self, x: torch.Tensor, raw_out: bool = False) -> torch.Tensor:
x = self.to_batch(x).float()
x = self.to_device(x, self.device)
with torch.no_grad():
out = self.model(x)
out = self.post_forward(out)
out = out.softmax(dim=1)
if not raw_out:
out = self.prob_to_pred(out)
out = self.to_device(out, 'cpu')
return out
def numpy_predict(self, x: np.ndarray,
raw_out: bool = False) -> np.ndarray:
_, h, w, _ = x.shape
transform, _ = self.get_data_transforms()
x = self.normalize_input(x)
x = self.to_batch(x)
x = np.stack([transform(image=img)['image'] for img in x])
x = torch.from_numpy(x)
x = x.permute((0, 3, 1, 2))
out = self.predict(x, raw_out=True)
out = F.interpolate(
out, size=(h, w), mode='bilinear', align_corners=False)
out = self.prob_to_pred(out)
return self.output_to_numpy(out)
def prob_to_pred(self, x):
return x.argmax(1)
def plot_batch(self,
x: torch.Tensor,
y: Union[torch.Tensor, np.ndarray],
output_path: str,
z: Optional[torch.Tensor] = None,
batch_limit: Optional[int] = None) -> None:
"""Plot a whole batch in a grid using plot_xyz.
Args:
x: batch of images
y: ground truth labels
output_path: local path where to save plot image
z: optional predicted labels
batch_limit: optional limit on (rendered) batch size
"""
batch_sz, c, h, w = x.shape
batch_sz = min(batch_sz,
batch_limit) if batch_limit is not None else batch_sz
if batch_sz == 0:
return
channel_groups = self.cfg.data.channel_display_groups
nrows = batch_sz
# one col for each group + 1 for labels + 1 for predictions
ncols = len(channel_groups) + 1
if z is not None:
ncols += 1
fig, axes = plt.subplots(
nrows=nrows,
ncols=ncols,
squeeze=False,
constrained_layout=True,
figsize=(3 * ncols, 3 * nrows))
assert axes.shape == (nrows, ncols)
# (N, c, h, w) --> (N, h, w, c)
x = x.permute(0, 2, 3, 1)
# apply transform, if given
if self.cfg.data.plot_options.transform is not None:
tf = A.from_dict(self.cfg.data.plot_options.transform)
imgs = [tf(image=img)['image'] for img in x.numpy()]
x = torch.from_numpy(np.stack(imgs))
for i in range(batch_sz):
ax = (fig, axes[i])
if z is None:
self.plot_xyz(ax, x[i], y[i])
else:
self.plot_xyz(ax, x[i], y[i], z=z[i])
make_dir(output_path, use_dirname=True)
plt.savefig(output_path, bbox_inches='tight')
plt.close()
def plot_xyz(self,
ax: Iterable,
x: torch.Tensor,
y: Union[torch.Tensor, np.ndarray],
z: Optional[torch.Tensor] = None) -> None:
channel_groups = self.cfg.data.channel_display_groups
# make subplot titles
if not isinstance(channel_groups, dict):
channel_groups = {
f'Channels: {[*chs]}': chs
for chs in channel_groups
}
fig, ax = ax
img_axes = ax[:len(channel_groups)]
label_ax = ax[len(channel_groups)]
# plot input image(s)
for (title, chs), ch_ax in zip(channel_groups.items(), img_axes):
im = x[..., chs]
if len(chs) == 1:
# repeat single channel 3 times
im = im.expand(-1, -1, 3)
elif len(chs) == 2:
# add a 3rd channel with all pixels set to 0.5
h, w, _ = x.shape
third_channel = torch.full((h, w, 1), fill_value=.5)
im = torch.cat((im, third_channel), dim=-1)
elif len(chs) > 3:
# only use the first 3 channels
log.warn(f'Only plotting first 3 channels of channel-group '
f'{title}: {chs}.')
im = x[..., chs[:3]]
ch_ax.imshow(im)
ch_ax.set_title(title)
ch_ax.set_xticks([])
ch_ax.set_yticks([])
class_colors = self.cfg.data.class_colors
colors = [color_to_triple(c) for c in class_colors]
colors = np.array(colors) / 255.
cmap = matplotlib.colors.ListedColormap(colors)
# plot labels
label_ax.imshow(
y, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none')
label_ax.set_title(f'Ground truth labels')
label_ax.set_xticks([])
label_ax.set_yticks([])
# plot predictions
if z is not None:
pred_ax = ax[-1]
pred_ax.imshow(
z, vmin=0, vmax=len(colors), cmap=cmap, interpolation='none')
pred_ax.set_title(f'Predicted labels')
pred_ax.set_xticks([])
pred_ax.set_yticks([])
# add a legend to the rightmost subplot
class_names = self.cfg.data.class_names
legend_items = [
mpatches.Patch(facecolor=col, edgecolor='black', label=name)
for col, name in zip(colors, class_names)
]
ax[-1].legend(
handles=legend_items,
loc='center right',
bbox_to_anchor=(1.8, 0.5))
| [
"torch.cat",
"logging.getLogger",
"torch.full",
"rastervision.pytorch_learner.utils.compute_conf_mat_metrics",
"matplotlib.patches.Patch",
"torch.no_grad",
"matplotlib.colors.ListedColormap",
"albumentations.from_dict",
"rastervision.pytorch_learner.utils.Parallel",
"rastervision.pytorch_learner.u... | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((233, 254), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (247, 254), False, 'import matplotlib\n'), ((719, 746), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (736, 746), False, 'import logging\n'), ((3625, 3652), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '(**args)\n', (3644, 3652), False, 'from torch import nn\n'), ((4130, 4166), 'rastervision.pytorch_learner.utils.compute_conf_mat', 'compute_conf_mat', (['out', 'y', 'num_labels'], {}), '(out, y, num_labels)\n', (4146, 4166), False, 'from rastervision.pytorch_learner.utils import compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, Parallel, AddTensors\n'), ((4480, 4541), 'rastervision.pytorch_learner.utils.compute_conf_mat_metrics', 'compute_conf_mat_metrics', (['conf_mat', 'self.cfg.data.class_names'], {}), '(conf_mat, self.cfg.data.class_names)\n', (4504, 4541), False, 'from rastervision.pytorch_learner.utils import compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, Parallel, AddTensors\n'), ((5577, 5596), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5593, 5596), False, 'import torch\n'), ((5691, 5760), 'torch.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(out, size=(h, w), mode='bilinear', align_corners=False)\n", (5704, 5760), True, 'from torch.nn import functional as F\n'), ((6955, 7070), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'squeeze': '(False)', 'constrained_layout': '(True)', 'figsize': '(3 * ncols, 3 * nrows)'}), '(nrows=nrows, ncols=ncols, squeeze=False, constrained_layout=\n True, figsize=(3 * ncols, 3 * nrows))\n', (6967, 7070), True, 'from matplotlib import pyplot as plt\n'), ((7746, 7785), 'rastervision.pipeline.file_system.make_dir', 'make_dir', (['output_path'], {'use_dirname': '(True)'}), '(output_path, use_dirname=True)\n', (7754, 7785), False, 'from rastervision.pipeline.file_system import make_dir\n'), ((7794, 7839), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""'}), "(output_path, bbox_inches='tight')\n", (7805, 7839), True, 'from matplotlib import pyplot as plt\n'), ((7848, 7859), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7857, 7859), True, 'from matplotlib import pyplot as plt\n'), ((9491, 9531), 'matplotlib.colors.ListedColormap', 'matplotlib.colors.ListedColormap', (['colors'], {}), '(colors)\n', (9523, 9531), False, 'import matplotlib\n'), ((2000, 2054), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_channels'}), '(in_channels=input_channels, **old_conv_args)\n', (2009, 2054), False, 'from torch import nn\n'), ((2371, 2428), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'new_conv_channels'}), '(in_channels=new_conv_channels, **old_conv_args)\n', (2380, 2428), False, 'from torch import nn\n'), ((3349, 3395), 'torch.tensor', 'torch.tensor', (['loss_weights'], {'device': 'self.device'}), '(loss_weights, device=self.device)\n', (3361, 3395), False, 'import torch\n'), ((4990, 5005), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5003, 5005), False, 'import torch\n'), ((7362, 7411), 'albumentations.from_dict', 'A.from_dict', (['self.cfg.data.plot_options.transform'], {}), '(self.cfg.data.plot_options.transform)\n', (7373, 7411), True, 'import albumentations as A\n'), ((9393, 9411), 'rastervision.pytorch_learner.utils.color_to_triple', 'color_to_triple', (['c'], {}), '(c)\n', (9408, 9411), False, 'from rastervision.pytorch_learner.utils import compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, Parallel, AddTensors\n'), ((9452, 9468), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (9460, 9468), True, 'import numpy as np\n'), ((10213, 10273), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'facecolor': 'col', 'edgecolor': '"""black"""', 'label': 'name'}), "(facecolor=col, edgecolor='black', label=name)\n", (10227, 10273), True, 'import matplotlib.patches as mpatches\n'), ((2560, 2621), 'rastervision.pytorch_learner.utils.SplitTensor', 'SplitTensor', (['(old_conv.in_channels, new_conv_channels)'], {'dim': '(1)'}), '((old_conv.in_channels, new_conv_channels), dim=1)\n', (2571, 2621), False, 'from rastervision.pytorch_learner.utils import compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, Parallel, AddTensors\n'), ((2702, 2730), 'rastervision.pytorch_learner.utils.Parallel', 'Parallel', (['old_conv', 'new_conv'], {}), '(old_conv, new_conv)\n', (2710, 2730), False, 'from rastervision.pytorch_learner.utils import compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, Parallel, AddTensors\n'), ((2791, 2803), 'rastervision.pytorch_learner.utils.AddTensors', 'AddTensors', ([], {}), '()\n', (2801, 2803), False, 'from rastervision.pytorch_learner.utils import compute_conf_mat_metrics, compute_conf_mat, color_to_triple, SplitTensor, Parallel, AddTensors\n'), ((2892, 2946), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_channels'}), '(in_channels=input_channels, **old_conv_args)\n', (2901, 2946), False, 'from torch import nn\n'), ((3122, 3158), 'rastervision.pipeline.config.ConfigError', 'ConfigError', (['f"""Something went wrong"""'], {}), "(f'Something went wrong')\n", (3133, 3158), False, 'from rastervision.pipeline.config import ConfigError\n'), ((7510, 7524), 'numpy.stack', 'np.stack', (['imgs'], {}), '(imgs)\n', (7518, 7524), True, 'import numpy as np\n'), ((4355, 4400), 'torch.stack', 'torch.stack', (["[o['val_loss'] for o in outputs]"], {}), "([o['val_loss'] for o in outputs])\n", (4366, 4400), False, 'import torch\n'), ((8859, 8896), 'torch.full', 'torch.full', (['(h, w, 1)'], {'fill_value': '(0.5)'}), '((h, w, 1), fill_value=0.5)\n', (8869, 8896), False, 'import torch\n'), ((8917, 8955), 'torch.cat', 'torch.cat', (['(im, third_channel)'], {'dim': '(-1)'}), '((im, third_channel), dim=-1)\n', (8926, 8955), False, 'import torch\n')] |
"""
mfmodel module. Contains the MFModel class
"""
import os, sys, inspect, warnings
import numpy as np
from .mfbase import PackageContainer, ExtFileAction, PackageContainerType, \
MFDataException, ReadAsArraysException, FlopyException, \
VerbosityLevel
from .mfpackage import MFPackage
from .coordinates import modeldimensions
from ..utils import datautil
from ..discretization.structuredgrid import StructuredGrid
from ..discretization.vertexgrid import VertexGrid
from ..discretization.unstructuredgrid import UnstructuredGrid
from ..discretization.grid import Grid
from flopy.discretization.modeltime import ModelTime
from ..mbase import ModelInterface
from .utils.mfenums import DiscretizationType
from .data import mfstructure
from ..utils.check import mf6check
class MFModel(PackageContainer, ModelInterface):
"""
MODFLOW Model Class. Represents a single model in a simulation.
Parameters
----------
simulation_data : MFSimulationData
simulation data object
structure : MFModelStructure
structure of this type of model
modelname : string
name of the model
model_nam_file : string
relative path to the model name file from model working folder
version : string
version of modflow
exe_name : string
model executable name
model_ws : string
model working folder path
disfile : string
relative path to dis file from model working folder
grid_type : string
type of grid the model will use (structured, unstructured, vertices)
verbose : bool
verbose setting for model operations (default False)
Attributes
----------
model_name : string
name of the model
exe_name : string
model executable name
packages : OrderedDict(MFPackage)
dictionary of model packages
_name_file_io : MFNameFile
name file
Methods
-------
load : (simulation : MFSimulationData, model_name : string,
namfile : string, type : string, version : string, exe_name : string,
model_ws : string, strict : boolean) : MFSimulation
a class method that loads a model from files
write
writes the simulation to files
remove_package : (package_name : string)
removes package from the model. package_name can be the
package's name, type, or package object to be removed from
the model
set_model_relative_path : (path : string)
sets the file path to the model folder and updates all model file paths
is_valid : () : boolean
checks the validity of the model and all of its packages
rename_all_packages : (name : string)
renames all packages in the model
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, simulation, model_type='gwf6', modelname='model',
model_nam_file=None, version='mf6',
exe_name='mf6.exe', add_to_simulation=True,
structure=None, model_rel_path='.', verbose=False, **kwargs):
super(MFModel, self).__init__(simulation.simulation_data, modelname)
self.simulation = simulation
self.simulation_data = simulation.simulation_data
self.name = modelname
self.name_file = None
self._version = version
self.model_type = model_type
self.type = 'Model'
if model_nam_file is None:
model_nam_file = '{}.nam'.format(modelname)
if add_to_simulation:
self.structure = simulation.register_model(self, model_type,
modelname,
model_nam_file)
else:
self.structure = structure
self.set_model_relative_path(model_rel_path)
self.exe_name = exe_name
self.dimensions = modeldimensions.ModelDimensions(self.name,
self.simulation_data)
self.simulation_data.model_dimensions[modelname] = self.dimensions
self._ftype_num_dict = {}
self._package_paths = {}
self._verbose = verbose
if model_nam_file is None:
self.model_nam_file = '{}.nam'.format(modelname)
else:
self.model_nam_file = model_nam_file
# check for spatial reference info in kwargs
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
if self._xul is not None:
warnings.warn('xul/yul have been deprecated. Use xll/yll instead.',
DeprecationWarning)
self._yul = kwargs.pop("yul", None)
if self._yul is not None:
warnings.warn('xul/yul have been deprecated. Use xll/yll instead.',
DeprecationWarning)
rotation = kwargs.pop("rotation", 0.)
proj4 = kwargs.pop("proj4_str", None)
# build model grid object
self._modelgrid = Grid(proj4=proj4, xoff=xll, yoff=yll,
angrot=rotation)
self.start_datetime = None
# check for extraneous kwargs
if len(kwargs) > 0:
kwargs_str = ', '.join(kwargs.keys())
excpt_str = 'Extraneous kwargs "{}" provided to ' \
'MFModel.'.format(kwargs_str)
raise FlopyException(excpt_str)
# build model name file
# create name file based on model type - support different model types
package_obj = self.package_factory('nam', model_type[0:3])
if not package_obj:
excpt_str = 'Name file could not be found for model' \
'{}.'.format(model_type[0:3])
raise FlopyException(excpt_str)
self.name_file = package_obj(self, filename=self.model_nam_file,
pname=self.name)
def __getattr__(self, item):
"""
__getattr__ - used to allow for getting packages as if they are
attributes
Parameters
----------
item : str
3 character package name (case insensitive)
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if item == 'name_file' or not hasattr(self, 'name_file'):
raise AttributeError(item)
package = self.get_package(item)
if package is not None:
return package
raise AttributeError(item)
def __repr__(self):
return self._get_data_str(True)
def __str__(self):
return self._get_data_str(False)
def _get_data_str(self, formal):
file_mgr = self.simulation_data.mfpath
data_str = 'name = {}\nmodel_type = {}\nversion = {}\nmodel_' \
'relative_path = {}' \
'\n\n'.format(self.name, self.model_type, self.version,
file_mgr.model_relative_path[self.name])
for package in self.packagelist:
pk_str = package._get_data_str(formal, False)
if formal:
if len(pk_str.strip()) > 0:
data_str = '{}###################\nPackage {}\n' \
'###################\n\n' \
'{}\n'.format(data_str, package._get_pname(),
pk_str)
else:
pk_str = package._get_data_str(formal, False)
if len(pk_str.strip()) > 0:
data_str = '{}###################\nPackage {}\n' \
'###################\n\n' \
'{}\n'.format(data_str, package._get_pname(),
pk_str)
return data_str
@property
def nper(self):
try:
return self.simulation.tdis.nper.array
except AttributeError:
return None
@property
def modeltime(self):
tdis = self.simulation.get_package('tdis')
period_data = tdis.perioddata.get_data()
# build steady state data
sto = self.get_package('sto')
if sto is None:
steady = np.full((len(period_data['perlen'])), True, dtype=bool)
else:
steady = np.full((len(period_data['perlen'])), False, dtype=bool)
ss_periods = sto.steady_state.get_active_key_dict()
tr_periods = sto.transient.get_active_key_dict()
if ss_periods:
last_ss_value = False
# loop through steady state array
for index, value in enumerate(steady):
# resolve if current index is steady state or transient
if index in ss_periods:
last_ss_value = True
elif index in tr_periods:
last_ss_value = False
if last_ss_value == True:
steady[index] = True
# build model time
itmuni = tdis.time_units.get_data()
start_date_time = tdis.start_date_time.get_data()
if itmuni is None:
itmuni = 0
if start_date_time is None:
start_date_time = '01-01-1970'
data_frame = {'perlen': period_data['perlen'],
'nstp': period_data['nstp'],
'tsmult': period_data['tsmult']}
self._model_time = ModelTime(data_frame, itmuni, start_date_time,
steady)
return self._model_time
@property
def modeldiscrit(self):
if self.get_grid_type() == DiscretizationType.DIS:
dis = self.get_package('dis')
return StructuredGrid(nlay=dis.nlay.get_data(),
nrow=dis.nrow.get_data(),
ncol=dis.ncol.get_data())
elif self.get_grid_type() == DiscretizationType.DISV:
dis = self.get_package('disv')
return VertexGrid(ncpl=dis.ncpl.get_data(),
nlay=dis.nlay.get_data())
elif self.get_grid_type() == DiscretizationType.DISU:
dis = self.get_package('disu')
return UnstructuredGrid(nodes=dis.nodes.get_data())
@property
def modelgrid(self):
if not self._mg_resync:
return self._modelgrid
if self.get_grid_type() == DiscretizationType.DIS:
dis = self.get_package('dis')
if not hasattr(dis, '_init_complete'):
if not hasattr(dis, 'delr'):
# dis package has not yet been initialized
return self._modelgrid
else:
# dis package has been partially initialized
self._modelgrid = StructuredGrid(
delc=dis.delc.array, delr=dis.delr.array,
top=None, botm=None, idomain=None, lenuni=None,
proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
else:
self._modelgrid = StructuredGrid(
delc=dis.delc.array, delr=dis.delr.array,
top=dis.top.array, botm=dis.botm.array,
idomain=dis.idomain.array, lenuni=dis.length_units.array,
proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
elif self.get_grid_type() == DiscretizationType.DISV:
dis = self.get_package('disv')
if not hasattr(dis, '_init_complete'):
if not hasattr(dis, 'cell2d'):
# disv package has not yet been initialized
return self._modelgrid
else:
# disv package has been partially initialized
self._modelgrid = VertexGrid(vertices=dis.vertices.array,
cell2d=dis.cell2d.array,
top=None,
botm=None,
idomain=None,
lenuni=None,
proj4=self._modelgrid.proj4,
epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
else:
self._modelgrid = VertexGrid(
vertices=dis.vertices.array, cell2d=dis.cell2d.array,
top=dis.top.array, botm=dis.botm.array,
idomain=dis.idomain.array, lenuni=dis.length_units.array,
proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
elif self.get_grid_type() == DiscretizationType.DISU:
dis = self.get_package('disu')
if not hasattr(dis, '_init_complete'):
# disu package has not yet been fully initialized
return self._modelgrid
cell2d = dis.cell2d.array
idomain = np.ones(dis.nodes.array, np.int32)
if cell2d is None:
if self.simulation.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print('WARNING: cell2d information missing. Functionality of '
'the UnstructuredGrid will be limited.')
iverts = None
xcenters = None
ycenters = None
else:
iverts = [list(i)[4:] for i in cell2d]
xcenters = dis.cell2d.array['xc']
ycenters = dis.cell2d.array['yc']
vertices = dis.vertices.array
if vertices is None:
if self.simulation.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print('WARNING: vertices information missing. Functionality '
'of the UnstructuredGrid will be limited.')
vertices = None
else:
vertices = np.array(vertices)
self._modelgrid = UnstructuredGrid(
vertices=vertices, iverts=iverts,
xcenters=xcenters,
ycenters=ycenters, top=dis.top.array,
botm=dis.bot.array, idomain=idomain,
lenuni=dis.length_units.array, proj4=self._modelgrid.proj4,
epsg=self._modelgrid.epsg, xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot,
nodes=dis.nodes.get_data())
elif self.get_grid_type() == DiscretizationType.DISL:
dis = self.get_package('disl')
if not hasattr(dis, '_init_complete'):
if not hasattr(dis, 'cell1d'):
# disv package has not yet been initialized
return self._modelgrid
else:
# disv package has been partially initialized
self._modelgrid = VertexGrid(vertices=dis.vertices.array,
cell1d=dis.cell1d.array,
top=None,
botm=None,
idomain=None,
lenuni=None,
proj4=self._modelgrid.proj4,
epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
else:
self._modelgrid = VertexGrid(
vertices=dis.vertices.array, cell1d=dis.cell1d.array,
top=dis.top.array, botm=dis.botm.array,
idomain=dis.idomain.array, lenuni=dis.length_units.array,
proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
else:
return self._modelgrid
if self.get_grid_type() != DiscretizationType.DISV:
# get coordinate data from dis file
xorig = dis.xorigin.get_data()
yorig = dis.yorigin.get_data()
angrot = dis.angrot.get_data()
else:
xorig = self._modelgrid.xoffset
yorig = self._modelgrid.yoffset
angrot = self._modelgrid.angrot
# resolve offsets
if xorig is None:
xorig = self._modelgrid.xoffset
if xorig is None:
if self._xul is not None:
xorig = self._modelgrid._xul_to_xll(self._xul)
else:
xorig = 0.0
if yorig is None:
yorig = self._modelgrid.yoffset
if yorig is None:
if self._yul is not None:
yorig = self._modelgrid._yul_to_yll(self._yul)
else:
yorig = 0.0
if angrot is None:
angrot = self._modelgrid.angrot
self._modelgrid.set_coord_info(xorig, yorig, angrot,
self._modelgrid.epsg,
self._modelgrid.proj4)
self._mg_resync = not self._modelgrid.is_complete
return self._modelgrid
@property
def packagelist(self):
return self._packagelist
@property
def namefile(self):
return self.model_nam_file
@property
def model_ws(self):
file_mgr = self.simulation_data.mfpath
return file_mgr.get_model_path(self.name)
@property
def exename(self):
return self.exe_name
@property
def version(self):
return self._version
@property
def solver_tols(self):
ims = self.get_ims_package()
if ims is not None:
rclose = ims.rcloserecord.get_data()
if rclose is not None:
rclose = rclose[0][0]
return ims.inner_hclose.get_data(), rclose
return None
@property
def laytyp(self):
try:
return self.npf.icelltype.array
except AttributeError:
return None
@property
def hdry(self):
return -1e30
@property
def hnoflo(self):
return 1e30
@property
def laycbd(self):
return None
def export(self, f, **kwargs):
from ..export import utils
return utils.model_export(f, self, **kwargs)
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = mf6check(self, f=f, verbose=verbose, level=level)
return self._check(chk, level)
@classmethod
def load_base(cls, simulation, structure, modelname='NewModel',
model_nam_file='modflowtest.nam', mtype='gwf', version='mf6',
exe_name='mf6.exe', strict=True, model_rel_path='.',
load_only=None):
"""
Load an existing model.
Parameters
----------
simulation : MFSimulation
simulation object that this model is a part of
simulation_data : MFSimulationData
simulation data object
structure : MFModelStructure
structure of this type of model
model_name : string
name of the model
model_nam_file : string
relative path to the model name file from model working folder
version : string
version of modflow
exe_name : string
model executable name
model_ws : string
model working folder relative to simulation working folder
strict : boolean
strict mode when loading files
model_rel_path : string
relative path of model folder to simulation folder
load_only : list
list of package abbreviations or package names corresponding to
packages that flopy will load. default is None, which loads all
packages. the discretization packages will load regardless of this
setting. subpackages, like time series and observations, will also
load regardless of this setting.
example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1']
Returns
-------
model : MFModel
Examples
--------
"""
instance = cls(simulation, mtype, modelname,
model_nam_file=model_nam_file,
version=version, exe_name=exe_name,
add_to_simulation=False, structure=structure,
model_rel_path=model_rel_path)
# build case consistent load_only dictionary for quick lookups
load_only = instance._load_only_dict(load_only)
# load name file
instance.name_file.load(strict)
# order packages
vnum = mfstructure.MFStructure().get_version_string()
# FIX: Transport - Priority packages maybe should not be hard coded
priority_packages = {'dis{}'.format(vnum): 1,'disv{}'.format(vnum): 1,
'disu{}'.format(vnum): 1}
packages_ordered = []
package_recarray = instance.simulation_data.mfdata[(modelname, 'nam',
'packages',
'packages')]
for item in package_recarray.get_data():
if item[0] in priority_packages:
packages_ordered.insert(0, (item[0], item[1], item[2]))
else:
packages_ordered.append((item[0], item[1], item[2]))
# load packages
sim_struct = mfstructure.MFStructure().sim_struct
instance._ftype_num_dict = {}
for ftype, fname, pname in packages_ordered:
ftype_orig = ftype
ftype = ftype[0:-1].lower()
if ftype in structure.package_struct_objs or ftype in \
sim_struct.utl_struct_objs:
if load_only is not None and not \
instance._in_pkg_list(priority_packages, ftype_orig,
pname) \
and not instance._in_pkg_list(load_only, ftype_orig,
pname):
if simulation.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print(' skipping package {}...'.format(ftype))
continue
if model_rel_path and model_rel_path != '.':
# strip off model relative path from the file path
filemgr = simulation.simulation_data.mfpath
fname = filemgr.strip_model_relative_path(modelname,
fname)
if simulation.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print(' loading package {}...'.format(ftype))
# load package
instance.load_package(ftype, fname, pname, strict, None)
# load referenced packages
if modelname in instance.simulation_data.referenced_files:
for ref_file in \
instance.simulation_data.referenced_files[modelname].values():
if (ref_file.file_type in structure.package_struct_objs or
ref_file.file_type in sim_struct.utl_struct_objs) and \
not ref_file.loaded:
instance.load_package(ref_file.file_type,
ref_file.file_name, None, strict,
ref_file.reference_path)
ref_file.loaded = True
# TODO: fix jagged lists where appropriate
return instance
def write(self, ext_file_action=ExtFileAction.copy_relative_paths):
"""
write model to model files
Parameters
----------
ext_file_action : ExtFileAction
defines what to do with external files when the simulation path has
changed. defaults to copy_relative_paths which copies only files
with relative paths, leaving files defined by absolute paths fixed.
Returns
-------
Examples
--------
"""
# write name file
if self.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print(' writing model name file...')
self.name_file.write(ext_file_action=ext_file_action)
# write packages
for pp in self.packagelist:
if self.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print(' writing package {}...'.format(pp._get_pname()))
pp.write(ext_file_action=ext_file_action)
def get_grid_type(self):
"""
Return the type of grid used by model 'model_name' in simulation
containing simulation data 'simulation_data'.
Returns
-------
grid type : DiscretizationType
"""
package_recarray = self.name_file.packages
structure = mfstructure.MFStructure()
if package_recarray.search_data(
'dis{}'.format(structure.get_version_string()),
0) is not None:
return DiscretizationType.DIS
elif package_recarray.search_data(
'disv{}'.format(structure.get_version_string()),
0) is not None:
return DiscretizationType.DISV
elif package_recarray.search_data(
'disu{}'.format(structure.get_version_string()),
0) is not None:
return DiscretizationType.DISU
elif package_recarray.search_data(
'disl{}'.format(structure.get_version_string()),
0) is not None:
return DiscretizationType.DISL
return DiscretizationType.UNDEFINED
def get_ims_package(self):
solution_group = self.simulation.name_file.solutiongroup.get_data()
for record in solution_group:
for model_name in record[2:]:
if model_name == self.name:
return self.simulation.get_ims_package(record[1])
return None
def get_steadystate_list(self):
ss_list = []
tdis = self.simulation.get_package('tdis')
period_data = tdis.perioddata.get_data()
index = 0
pd_len = len(period_data)
while index < pd_len:
ss_list.append(True)
index += 1
storage = self.get_package('sto')
if storage is not None:
tr_keys = storage.transient.get_keys(True)
ss_keys = storage.steady_state.get_keys(True)
for key in tr_keys:
ss_list[key] = False
for ss_list_key in range(key + 1, len(ss_list)):
for ss_key in ss_keys:
if ss_key == ss_list_key:
break
ss_list[key] = False
return ss_list
def is_valid(self):
"""
checks the validity of the model and all of its packages
Parameters
----------
Returns
-------
valid : boolean
Examples
--------
"""
# valid name file
if not self.name_file.is_valid():
return False
# valid packages
for pp in self.packagelist:
if not pp.is_valid():
return False
# required packages exist
for package_struct in self.structure.package_struct_objs.values():
if not package_struct.optional and not package_struct.file_type \
in self.package_type_dict:
return False
return True
def set_model_relative_path(self, model_ws):
"""
sets the file path to the model folder relative to the simulation
folder and updates all model file paths, placing them in the model
folder
Parameters
----------
model_ws : string
model working folder relative to simulation working folder
Returns
-------
Examples
--------
"""
# update path in the file manager
file_mgr = self.simulation_data.mfpath
file_mgr.set_last_accessed_model_path()
path = file_mgr.string_to_file_path(model_ws)
file_mgr.model_relative_path[self.name] = path
if model_ws and model_ws != '.' and self.simulation.name_file is not \
None:
# update model name file location in simulation name file
models = self.simulation.name_file.models
models_data = models.get_data()
for index, entry in enumerate(models_data):
old_model_file_name = os.path.split(entry[1])[1]
old_model_base_name = os.path.splitext(old_model_file_name)[0]
if old_model_base_name.lower() == self.name.lower() or \
self.name == entry[2]:
models_data[index][1] = os.path.join(path,
old_model_file_name)
break
models.set_data(models_data)
if self.name_file is not None:
# update listing file location in model name file
list_file = self.name_file.list.get_data()
if list_file:
path, list_file_name = os.path.split(list_file)
try:
self.name_file.list.set_data(os.path.join(
path, list_file_name))
except MFDataException as mfde:
message = 'Error occurred while setting relative ' \
'path "{}" in model '\
'"{}".'.format(os.path.join(path,
list_file_name),
self.name)
raise MFDataException(mfdata_except=mfde,
model=self.model_name,
package=self.name_file.
_get_pname(),
message=message)
# update package file locations in model name file
packages = self.name_file.packages
packages_data = packages.get_data()
for index, entry in enumerate(packages_data):
old_package_name = os.path.split(entry[1])[1]
packages_data[index][1] = os.path.join(path,
old_package_name)
packages.set_data(packages_data)
# update files referenced from within packages
for package in self.packagelist:
package.set_model_relative_path(model_ws)
def _remove_package_from_dictionaries(self, package):
# remove package from local dictionaries and lists
if package.path in self._package_paths:
del self._package_paths[package.path]
self._remove_package(package)
def remove_package(self, package_name):
"""
removes a package and all child packages from the model
Parameters
----------
package_name : str
package name, package type, or package object to be removed from
the model
Returns
-------
Examples
--------
"""
if isinstance(package_name, MFPackage):
packages = [package_name]
else:
packages = self.get_package(package_name)
if not isinstance(packages, list):
packages = [packages]
for package in packages:
if package.model_or_sim.name != self.name:
except_text = 'Package can not be removed from model {} ' \
'since it is ' \
'not part of '
raise mfstructure.FlopyException(except_text)
self._remove_package_from_dictionaries(package)
try:
# remove package from name file
package_data = self.name_file.packages.get_data()
except MFDataException as mfde:
message = 'Error occurred while reading package names ' \
'from name file in model ' \
'"{}".'.format(self.name)
raise MFDataException(mfdata_except=mfde,
model=self.model_name,
package=self.name_file._get_pname(),
message=message)
try:
new_rec_array = None
for item in package_data:
if item[1] != package._filename:
if new_rec_array is None:
new_rec_array = np.rec.array([item.tolist()],
package_data.dtype)
else:
new_rec_array = np.hstack((item, new_rec_array))
except:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'building package recarray',
self.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, None,
self._simulation_data.debug)
try:
self.name_file.packages.set_data(new_rec_array)
except MFDataException as mfde:
message = 'Error occurred while setting package names ' \
'from name file in model "{}". Package name ' \
'data:\n{}'.format(self.name, new_rec_array)
raise MFDataException(mfdata_except=mfde,
model=self.model_name,
package=self.name_file._get_pname(),
message=message)
# build list of child packages
child_package_list = []
for pkg in self.packagelist:
if pkg.parent_file is not None and pkg.parent_file.path == \
package.path:
child_package_list.append(pkg)
# remove child packages
for child_package in child_package_list:
self._remove_package_from_dictionaries(child_package)
def rename_all_packages(self, name):
package_type_count = {}
self.name_file.filename = '{}.nam'.format(name)
for package in self.packagelist:
if package.package_type not in package_type_count:
package.filename = '{}.{}'.format(name, package.package_type)
package_type_count[package.package_type] = 1
else:
package_type_count[package.package_type] += 1
package.filename = '{}_{}.{}'.format(
name, package_type_count[package.package_type],
package.package_type)
def register_package(self, package, add_to_package_list=True,
set_package_name=True, set_package_filename=True):
"""
registers a package with the model
Parameters
----------
package : MFPackage
package to register
add_to_package_list : bool
add package to lookup list
set_package_name : bool
produce a package name for this package
set_package_filename : bool
produce a filename for this package
Returns
-------
(path : tuple, package structure : MFPackageStructure)
Examples
--------
"""
package.container_type = [PackageContainerType.model]
if package.parent_file is not None:
path = package.parent_file.path + (package.package_type,)
else:
path = (self.name, package.package_type)
package_struct = \
self.structure.get_package_struct(package.package_type)
if add_to_package_list and path in self._package_paths:
if not package_struct.multi_package_support:
# package of this type already exists, replace it
self.remove_package(package.package_type)
if self.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print('WARNING: Package with type {} already exists. '
'Replacing existing package'
'.'.format(package.package_type))
elif not set_package_name and package.package_name in \
self.package_name_dict:
# package of this type with this name already
# exists, replace it
self.remove_package(
self.package_name_dict[package.package_name])
if self.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print(
'WARNING: Package with name {} already exists. '
'Replacing existing package'
'.'.format(package.package_name))
# make sure path is unique
if path in self._package_paths:
path_iter = datautil.PathIter(path)
for new_path in path_iter:
if new_path not in self._package_paths:
path = new_path
break
self._package_paths[path] = 1
if package.package_type.lower() == 'nam':
return path, self.structure.name_file_struct_obj
if set_package_name:
# produce a default package name
if package_struct is not None and \
package_struct.multi_package_support:
# check for other registered packages of this type
name_iter = datautil.NameIter(package.package_type, False)
for package_name in name_iter:
if package_name not in self.package_name_dict:
package.package_name = package_name
break
else:
package.package_name = package.package_type
if set_package_filename:
package._filename = '{}.{}'.format(self.name, package.package_type)
if add_to_package_list:
self._add_package(package, path)
# add obs file to name file if it does not have a parent
if package.package_type in self.structure.package_struct_objs or \
(package.package_type == 'obs' and package.parent_file is None):
# update model name file
pkg_type = package.package_type.upper()
if len(pkg_type) > 3 and pkg_type[-1] == 'A':
pkg_type = pkg_type[0:-1]
# Model Assumption - assuming all name files have a package
# recarray
self.name_file.packages.\
update_record(['{}6'.format(pkg_type), package._filename,
package.package_name], 0)
if package_struct is not None:
return (path, package_struct)
else:
if self.simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print('WARNING: Unable to register unsupported file type {} '
'for model {}.'.format(package.package_type, self.name))
return None, None
def load_package(self, ftype, fname, pname, strict, ref_path,
dict_package_name=None, parent_package=None):
"""
loads a package from a file
Parameters
----------
ftype : string
the file type
fname : string
the name of the file containing the package input
pname : string
the user-defined name for the package
strict : bool
strict mode when loading the file
ref_path : string
path to the file. uses local path if set to None
dict_package_name : string
package name for dictionary lookup
parent_package : MFPackage
parent package
Examples
--------
"""
if ref_path is not None:
fname = os.path.join(ref_path, fname)
sim_struct = mfstructure.MFStructure().sim_struct
if (ftype in self.structure.package_struct_objs and
self.structure.package_struct_objs[ftype].multi_package_support) or \
(ftype in sim_struct.utl_struct_objs and
sim_struct.utl_struct_objs[ftype].multi_package_support):
# resolve dictionary name for package
if dict_package_name is not None:
if parent_package is not None:
dict_package_name = '{}_{}'.format(parent_package.path[-1],
ftype)
else:
# use dict_package_name as the base name
if ftype in self._ftype_num_dict:
self._ftype_num_dict[dict_package_name] += 1
else:
self._ftype_num_dict[dict_package_name] = 0
dict_package_name = '{}_{}'.format(dict_package_name,
self._ftype_num_dict[
dict_package_name])
else:
# use ftype as the base name
if ftype in self._ftype_num_dict:
self._ftype_num_dict[ftype] += 1
else:
self._ftype_num_dict[ftype] = 0
if pname is not None:
dict_package_name = pname
else:
dict_package_name = '{}_{}'.format(ftype,
self._ftype_num_dict[
ftype])
else:
dict_package_name = ftype
# clean up model type text
model_type = self.structure.model_type
while datautil.DatumUtil.is_int(model_type[-1]):
model_type = model_type[0:-1]
# create package
package_obj = self.package_factory(ftype, model_type)
package = package_obj(self, filename=fname, pname=dict_package_name,
loading_package=True,
parent_file=parent_package)
try:
package.load(strict)
except ReadAsArraysException:
# create ReadAsArrays package and load it instead
package_obj = self.package_factory('{}a'.format(ftype), model_type)
package = package_obj(self, filename=fname, pname=dict_package_name,
loading_package=True,
parent_file=parent_package)
package.load(strict)
# register child package with the model
self._add_package(package, package.path)
if parent_package is not None:
# register child package with the parent package
parent_package._add_package(package, package.path)
return package
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data from a model instance
Args:
model: Flopy model instance
SelPackList: (list) list of package names to plot, if none
all packages will be plotted
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns:
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
"""
from flopy.plot.plotutil import PlotUtilities
axes = PlotUtilities._plot_model_helper(self,
SelPackList=SelPackList,
**kwargs)
return axes | [
"flopy.plot.plotutil.PlotUtilities._plot_model_helper",
"inspect.stack",
"numpy.ones",
"numpy.hstack",
"numpy.array",
"os.path.splitext",
"sys.exc_info",
"warnings.warn",
"os.path.split",
"os.path.join",
"flopy.discretization.modeltime.ModelTime"
] | [((9850, 9904), 'flopy.discretization.modeltime.ModelTime', 'ModelTime', (['data_frame', 'itmuni', 'start_date_time', 'steady'], {}), '(data_frame, itmuni, start_date_time, steady)\n', (9859, 9904), False, 'from flopy.discretization.modeltime import ModelTime\n'), ((48929, 49002), 'flopy.plot.plotutil.PlotUtilities._plot_model_helper', 'PlotUtilities._plot_model_helper', (['self'], {'SelPackList': 'SelPackList'}), '(self, SelPackList=SelPackList, **kwargs)\n', (48961, 49002), False, 'from flopy.plot.plotutil import PlotUtilities\n'), ((4742, 4833), 'warnings.warn', 'warnings.warn', (['"""xul/yul have been deprecated. Use xll/yll instead."""', 'DeprecationWarning'], {}), "('xul/yul have been deprecated. Use xll/yll instead.',\n DeprecationWarning)\n", (4755, 4833), False, 'import os, sys, inspect, warnings\n'), ((4950, 5041), 'warnings.warn', 'warnings.warn', (['"""xul/yul have been deprecated. Use xll/yll instead."""', 'DeprecationWarning'], {}), "('xul/yul have been deprecated. Use xll/yll instead.',\n DeprecationWarning)\n", (4963, 5041), False, 'import os, sys, inspect, warnings\n'), ((44360, 44389), 'os.path.join', 'os.path.join', (['ref_path', 'fname'], {}), '(ref_path, fname)\n', (44372, 44389), False, 'import os, sys, inspect, warnings\n'), ((14160, 14194), 'numpy.ones', 'np.ones', (['dis.nodes.array', 'np.int32'], {}), '(dis.nodes.array, np.int32)\n', (14167, 14194), True, 'import numpy as np\n'), ((31937, 31960), 'os.path.split', 'os.path.split', (['entry[1]'], {}), '(entry[1])\n', (31950, 31960), False, 'import os, sys, inspect, warnings\n'), ((32003, 32040), 'os.path.splitext', 'os.path.splitext', (['old_model_file_name'], {}), '(old_model_file_name)\n', (32019, 32040), False, 'import os, sys, inspect, warnings\n'), ((32211, 32250), 'os.path.join', 'os.path.join', (['path', 'old_model_file_name'], {}), '(path, old_model_file_name)\n', (32223, 32250), False, 'import os, sys, inspect, warnings\n'), ((32626, 32650), 'os.path.split', 'os.path.split', (['list_file'], {}), '(list_file)\n', (32639, 32650), False, 'import os, sys, inspect, warnings\n'), ((33879, 33915), 'os.path.join', 'os.path.join', (['path', 'old_package_name'], {}), '(path, old_package_name)\n', (33891, 33915), False, 'import os, sys, inspect, warnings\n'), ((36630, 36644), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (36642, 36644), False, 'import os, sys, inspect, warnings\n'), ((15230, 15248), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (15238, 15248), True, 'import numpy as np\n'), ((33805, 33828), 'os.path.split', 'os.path.split', (['entry[1]'], {}), '(entry[1])\n', (33818, 33828), False, 'import os, sys, inspect, warnings\n'), ((32731, 32765), 'os.path.join', 'os.path.join', (['path', 'list_file_name'], {}), '(path, list_file_name)\n', (32743, 32765), False, 'import os, sys, inspect, warnings\n'), ((36531, 36563), 'numpy.hstack', 'np.hstack', (['(item, new_rec_array)'], {}), '((item, new_rec_array))\n', (36540, 36563), True, 'import numpy as np\n'), ((33036, 33070), 'os.path.join', 'os.path.join', (['path', 'list_file_name'], {}), '(path, list_file_name)\n', (33048, 33070), False, 'import os, sys, inspect, warnings\n'), ((36999, 37014), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (37012, 37014), False, 'import os, sys, inspect, warnings\n')] |
import numpy as np
from loguru import logger
from pymilvus import (
connections,
utility,
FieldSchema,
CollectionSchema,
DataType,
Collection,
)
class MilvusClient(object):
def __init__(self, milvus_host="localhost", milvus_port=19530, vector_dim=1000):
self.connections = connections.connect("default", host=milvus_host, port=milvus_port)
logger.info("Successfully create Milvus connection")
fields = [
FieldSchema(name="product_id", dtype=DataType.INT64),
FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
FieldSchema(name="uuid", dtype=DataType.INT64),
FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
]
schema = CollectionSchema(fields, "VSearch Database")
self.collection = Collection("VSearch", schema)
index = {
"index_type": "FLAT",
"metric_type": "IP",
"params": {"nlist": 128}
}
self.collection.create_index("vector", index)
self.collection.load()
def insert(self, id, vector, _uuid):
data = [
[id],
[vector],
[_uuid]
]
ids = self.collection.insert(data)
return ids
def delete(self, id):
search_param = {
"data": [np.random.rand(1000)],
"anns_field": "vector",
"param": {
"metric_type": "IP",
"params": {"nprobe": 16}
},
"limit": 10
}
removed_ids = list()
item = self.collection.search(**search_param, expr=f"product_id == {id}", output_fields=["product_id"])
for i in range(len(item[0])):
removed_ids.append(item[0][i].id)
expr = "id in " + "[" + ", ".join([str(x) for x in removed_ids]) + "]"
self.collection.delete(expr)
def update(self, id, vector):
pass
def search(self, vector):
_TOP_K = 10
search_param = {
"data": [vector],
"anns_field": "vector",
"param": {
"metric_type": "IP",
"params": {"nprobe": 16}
},
"limit": _TOP_K,
}
response = list()
results = self.collection.search(**search_param, output_fields=["product_id", "uuid"])
for i in range(min(_TOP_K, len(results[0]))):
response.append([results[0][i].entity.product_id, results[0][i].distance, results[0][i].entity.uuid])
return response
| [
"loguru.logger.info",
"pymilvus.Collection",
"pymilvus.CollectionSchema",
"pymilvus.FieldSchema",
"numpy.random.rand",
"pymilvus.connections.connect"
] | [((311, 377), 'pymilvus.connections.connect', 'connections.connect', (['"""default"""'], {'host': 'milvus_host', 'port': 'milvus_port'}), "('default', host=milvus_host, port=milvus_port)\n", (330, 377), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((386, 438), 'loguru.logger.info', 'logger.info', (['"""Successfully create Milvus connection"""'], {}), "('Successfully create Milvus connection')\n", (397, 438), False, 'from loguru import logger\n'), ((787, 831), 'pymilvus.CollectionSchema', 'CollectionSchema', (['fields', '"""VSearch Database"""'], {}), "(fields, 'VSearch Database')\n", (803, 831), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((858, 887), 'pymilvus.Collection', 'Collection', (['"""VSearch"""', 'schema'], {}), "('VSearch', schema)\n", (868, 887), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((471, 523), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""product_id"""', 'dtype': 'DataType.INT64'}), "(name='product_id', dtype=DataType.INT64)\n", (482, 523), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((537, 608), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""vector"""', 'dtype': 'DataType.FLOAT_VECTOR', 'dim': 'vector_dim'}), "(name='vector', dtype=DataType.FLOAT_VECTOR, dim=vector_dim)\n", (548, 608), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((622, 668), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""uuid"""', 'dtype': 'DataType.INT64'}), "(name='uuid', dtype=DataType.INT64)\n", (633, 668), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((682, 757), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""id"""', 'dtype': 'DataType.INT64', 'is_primary': '(True)', 'auto_id': '(True)'}), "(name='id', dtype=DataType.INT64, is_primary=True, auto_id=True)\n", (693, 757), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((1371, 1391), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1385, 1391), True, 'import numpy as np\n')] |
# TRANSFORMER CODEMASTER
# CODE BY <NAME>
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from nltk.corpus import gutenberg
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from players.codemaster import codemaster
import random
import scipy
import re
import torch
import torch.nn as nn
import numpy as np
from sklearn.neighbors import NearestNeighbors
#AutoModelWithLMHead
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, GPT2Tokenizer, TFGPT2Model
class ai_codemaster(codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
#write any initializing code here
# DEFINE THRESHOLD VALUE
self.dist_threshold = 0.3
# 1. GET EMBEDDING FOR RED WORDS USING GPT2
torch.set_grad_enabled(False)
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
self.model = AutoModelForCausalLM.from_pretrained("gpt2")
#get stop words and what-not
nltk.download('popular',quiet=True)
nltk.download('words',quiet=True)
self.corp_words = set(nltk.corpus.words.words())
return
def receive_game_state(self, words, maps):
self.words = words
self.maps = maps
def give_clue(self):
# 1. GET THE RED WORDS
count = 0
red_words = []
bad_words = []
# Creates Red-Labeled Word arrays, and everything else arrays
for i in range(25):
if self.words[i][0] == '*':
continue
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
bad_words.append(self.words[i].lower())
else:
red_words.append(self.words[i].lower())
#print("RED:\t", red_words)
''' WRITE NEW CODE HERE '''
# 1. Add \u0120 in front of every word to get a better embedding
spec_red_words = list(map(lambda w: "\u0120" + w, red_words))
#print(spec_red_words)
# 2. CREATE WORD EMBEDDINGS FOR THE RED WORDS
red_emb = self.word_embedding(spec_red_words) #retrieves embedding for red_words from gpt2 layer 0 (static embedding)
# 3. USE THE K NEAREST NEIGHBOR -LIKE ALGORITHM (FIND K NEIGHBORS BASED ON THRESHOLD)
'''
DISTANCE MATRIX FOR EACH VECTOR
a. b c
a | - | -0.2 | 0.3 |
b | 0.3 | - | -0.4 |
c | 0.1 | 0.6 | - |
Choose the words that has the most neighbors within
the distance threshold
'''
# create distance matrix for words
num_words = red_emb.shape[0]
dist = np.zeros((num_words,num_words))
for i in range(num_words):
for j in range((red_emb.shape[0])):
dist[i][j] = self.cos_sim(red_emb[i],red_emb[j])
## find the word with more neighbors within threshold
# count number of neighbors below threshold for each word
how_many = []
for i in range(num_words):
how_many.append((dist[i] >= self.dist_threshold).sum())
#max number of words
clue_num = max(how_many)
# find which is the word with max number of neighbors
donde = np.where(how_many == clue_num)[0][0]
# find list of vectors in the subset
subset = []
np.where(dist[donde] >= self.dist_threshold)[0]
for i in range(np.where(dist[donde] >= self.dist_threshold)[0].shape[0]):
subset.append(np.where(dist[donde] >= self.dist_threshold)[0][i])
#DEBUG
#print(subset)
#print(red_emb[subset])
#print(dist)
#print(how_many)
# 4. FIND THE CENTROID OF THE SUBSET
center = torch.mean(red_emb[subset], dim=0)
# 5. USE KNN TO FIND THE CLOSEST MATCH IN THE GPT2 MATRIX FOR THE CENTROID VECTOR
emb_matrix = self.model.transformer.wte.weight
self.vectors = emb_matrix.detach()
# 6. RETURN THE WORD FROM THE GPT2 MATRIX + THE NUMBER OF THE NEIGHBORS FROM THE CLUSTER
clue = self.getBestCleanWord(center, self.words)
# 6. RETURN THE WORD FROM THE GPT2 MATRIX + THE NUMBER OF THE NEIGHBORS FROM THE CLUSTER
#clue = tokenizer.convert_ids_to_tokens(int(knn.kneighbors(center.reshape(1,-1))[1][0][0]))
return [clue,clue_num]
#create word vectors for each word
def word_embedding(self, red_words):
text_index = self.tokenizer.encode(red_words,add_prefix_space=False)
word_emb = self.model.transformer.wte.weight[text_index,:]
return word_emb
# cosine similarity
def cos_sim(self, input1, input2):
cos = nn.CosineSimilarity(dim=0,eps=1e-6)
return cos(input1, input2)
#clean up the set of words
def cleanWords(self, embed):
recomm = [i.lower() for i in embed]
recomm2 = ' '.join(recomm)
recomm3 = [w for w in nltk.wordpunct_tokenize(recomm2) \
if w.lower() in self.corp_words or not w.isalpha()]
prepositions = open('ai4games/prepositions_etc.txt').read().splitlines() #create list with prepositions
stop_words = nltk.corpus.stopwords.words('english') #change set format
stop_words.extend(prepositions) #add prepositions and similar to stopwords
word_tokens = word_tokenize(' '.join(recomm3))
recomm4 = [w for w in word_tokens if not w in stop_words]
excl_ascii = lambda s: re.match('^[\x00-\x7F]+$', s) != None #checks for ascii only
is_uni_char = lambda s: (len(s) == 1) == True #check if a univode character
recomm5 = [w for w in recomm4 if excl_ascii(w) and not is_uni_char(w) and not w.isdigit()]
return recomm5
def getBestCleanWord(self, center, board):
tries = 1
amt = 100
maxTry = 5
knn = NearestNeighbors(n_neighbors=(maxTry*amt))
knn.fit(self.vectors)
vecinos = knn.kneighbors(center.reshape(1,-1))
low_board = list(map(lambda w: w.lower(), board))
while (tries < 5):
# 6. WORD CLEANUP AND PARSING
recomm = []
#numrec = (tries-1)*1000
for i in range((tries-1)*amt,(tries)*amt):
recomm.append(self.tokenizer.decode((int(vecinos[1][0][i])), skip_special_tokens = True, clean_up_tokenization_spaces = True))
clean_words = self.cleanWords(recomm)
#print(clean_words)
#7. Get the first word not in the board
for w in clean_words:
if w not in low_board:
return w
#otherwise try again
tries+=1
return "??" #i got nothing out of 5000 words
| [
"torch.mean",
"transformers.AutoModelForCausalLM.from_pretrained",
"torch.set_grad_enabled",
"nltk.wordpunct_tokenize",
"numpy.zeros",
"re.match",
"torch.nn.CosineSimilarity",
"numpy.where",
"sklearn.neighbors.NearestNeighbors",
"nltk.corpus.stopwords.words",
"transformers.GPT2Tokenizer.from_pre... | [((814, 843), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (836, 843), False, 'import torch\n'), ((863, 900), 'transformers.GPT2Tokenizer.from_pretrained', 'GPT2Tokenizer.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (892, 900), False, 'from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, GPT2Tokenizer, TFGPT2Model\n'), ((916, 960), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (952, 960), False, 'from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, GPT2Tokenizer, TFGPT2Model\n'), ((995, 1031), 'nltk.download', 'nltk.download', (['"""popular"""'], {'quiet': '(True)'}), "('popular', quiet=True)\n", (1008, 1031), False, 'import nltk\n'), ((1033, 1067), 'nltk.download', 'nltk.download', (['"""words"""'], {'quiet': '(True)'}), "('words', quiet=True)\n", (1046, 1067), False, 'import nltk\n'), ((2432, 2464), 'numpy.zeros', 'np.zeros', (['(num_words, num_words)'], {}), '((num_words, num_words))\n', (2440, 2464), True, 'import numpy as np\n'), ((3357, 3391), 'torch.mean', 'torch.mean', (['red_emb[subset]'], {'dim': '(0)'}), '(red_emb[subset], dim=0)\n', (3367, 3391), False, 'import torch\n'), ((4217, 4254), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (4236, 4254), True, 'import torch.nn as nn\n'), ((4644, 4682), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (4671, 4682), False, 'import nltk\n'), ((5267, 5309), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(maxTry * amt)'}), '(n_neighbors=maxTry * amt)\n', (5283, 5309), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1091, 1116), 'nltk.corpus.words.words', 'nltk.corpus.words.words', ([], {}), '()\n', (1114, 1116), False, 'import nltk\n'), ((3021, 3065), 'numpy.where', 'np.where', (['(dist[donde] >= self.dist_threshold)'], {}), '(dist[donde] >= self.dist_threshold)\n', (3029, 3065), True, 'import numpy as np\n'), ((2928, 2958), 'numpy.where', 'np.where', (['(how_many == clue_num)'], {}), '(how_many == clue_num)\n', (2936, 2958), True, 'import numpy as np\n'), ((4433, 4465), 'nltk.wordpunct_tokenize', 'nltk.wordpunct_tokenize', (['recomm2'], {}), '(recomm2)\n', (4456, 4465), False, 'import nltk\n'), ((4920, 4949), 're.match', 're.match', (["'^[\\x00-\\x7f]+$'", 's'], {}), "('^[\\x00-\\x7f]+$', s)\n", (4928, 4949), False, 'import re\n'), ((3086, 3130), 'numpy.where', 'np.where', (['(dist[donde] >= self.dist_threshold)'], {}), '(dist[donde] >= self.dist_threshold)\n', (3094, 3130), True, 'import numpy as np\n'), ((3165, 3209), 'numpy.where', 'np.where', (['(dist[donde] >= self.dist_threshold)'], {}), '(dist[donde] >= self.dist_threshold)\n', (3173, 3209), True, 'import numpy as np\n')] |
import numpy as np
import torch
from mmcv.utils import print_log
from terminaltables import AsciiTable
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision (for single or multiple scales).
Args:
recalls (np.ndarray): Recalls with shape of (num_scales, num_dets) \
or (num_dets, ).
precisions (np.ndarray): Precisions with shape of \
(num_scales, num_dets) or (num_dets, ).
mode (str): 'area' or '11points', 'area' means calculating the area
under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1]
Returns:
float or np.ndarray: Calculated average precision.
"""
if recalls.ndim == 1:
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape
assert recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'area':
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
elif mode == '11points':
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
def eval_det_cls(pred, gt, iou_thr=None):
"""Generic functions to compute precision/recall for object detection for a
single class.
Args:
pred (dict): Predictions mapping from image id to bounding boxes \
and scores.
gt (dict): Ground truths mapping from image id to bounding boxes.
iou_thr (list[float]): A list of iou thresholds.
Return:
tuple (np.ndarray, np.ndarray, float): Recalls, precisions and \
average precision.
"""
# {img_id: {'bbox': box structure, 'det': matched list}}
class_recs = {}
npos = 0
img_id_npos = {}
for img_id in gt.keys():
cur_gt_num = len(gt[img_id])
if cur_gt_num != 0:
gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32)
for i in range(cur_gt_num):
gt_cur[i] = gt[img_id][i].tensor
bbox = gt[img_id][0].new_box(gt_cur)
else:
bbox = gt[img_id]
det = [[False] * len(bbox) for i in iou_thr]
npos += len(bbox)
img_id_npos[img_id] = img_id_npos.get(img_id, 0) + len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# construct dets
image_ids = []
confidence = []
ious = []
for img_id in pred.keys():
cur_num = len(pred[img_id])
if cur_num == 0:
continue
pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32)
box_idx = 0
for box, score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
pred_cur[box_idx] = box.tensor
box_idx += 1
pred_cur = box.new_box(pred_cur)
gt_cur = class_recs[img_id]['bbox']
if len(gt_cur) > 0:
# calculate iou in each image
iou_cur = pred_cur.overlaps(pred_cur, gt_cur)
for i in range(cur_num):
ious.append(iou_cur[i])
else:
for i in range(cur_num):
ious.append(np.zeros(1))
confidence = np.array(confidence)
# sort by confidence
sorted_ind = np.argsort(-confidence)
image_ids = [image_ids[x] for x in sorted_ind]
ious = [ious[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp_thr = [np.zeros(nd) for i in iou_thr]
fp_thr = [np.zeros(nd) for i in iou_thr]
for d in range(nd):
R = class_recs[image_ids[d]]
iou_max = -np.inf
BBGT = R['bbox']
cur_iou = ious[d]
if len(BBGT) > 0:
# compute overlaps
for j in range(len(BBGT)):
# iou = get_iou_main(get_iou_func, (bb, BBGT[j,...]))
iou = cur_iou[j]
if iou > iou_max:
iou_max = iou
jmax = j
for iou_idx, thresh in enumerate(iou_thr):
if iou_max > thresh:
if not R['det'][iou_idx][jmax]:
tp_thr[iou_idx][d] = 1.
R['det'][iou_idx][jmax] = 1
else:
fp_thr[iou_idx][d] = 1.
else:
fp_thr[iou_idx][d] = 1.
ret = []
# Return additional information for custom metrics.
new_ret = {}
new_ret["image_ids"] = image_ids
new_ret["iou_thr"] = iou_thr
new_ret["ious"] = [max(x.tolist()) for x in ious]
new_ret["fp_thr"] = [x.tolist() for x in fp_thr]
new_ret["tp_thr"] = [x.tolist() for x in tp_thr]
new_ret["img_id_npos"] = img_id_npos
for iou_idx, thresh in enumerate(iou_thr):
# compute precision recall
fp = np.cumsum(fp_thr[iou_idx])
tp = np.cumsum(tp_thr[iou_idx])
recall = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = average_precision(recall, precision)
ret.append((recall, precision, ap))
return ret, new_ret
def eval_map_recall(pred, gt, ovthresh=None):
"""Evaluate mAP and recall.
Generic functions to compute precision/recall for object detection
for multiple classes.
Args:
pred (dict): Information of detection results,
which maps class_id and predictions.
gt (dict): Information of ground truths, which maps class_id and \
ground truths.
ovthresh (list[float]): iou threshold.
Default: None.
Return:
tuple[dict]: dict results of recall, AP, and precision for all classes.
"""
ret_values = {}
new_ret_values = {}
for classname in gt.keys():
if classname in pred:
ret_values[classname], new_ret_values[classname] = eval_det_cls(pred[classname],
gt[classname], ovthresh)
recall = [{} for i in ovthresh]
precision = [{} for i in ovthresh]
ap = [{} for i in ovthresh]
for label in gt.keys():
for iou_idx, thresh in enumerate(ovthresh):
if label in pred:
recall[iou_idx][label], precision[iou_idx][label], ap[iou_idx][
label] = ret_values[label][iou_idx]
else:
recall[iou_idx][label] = np.zeros(1)
precision[iou_idx][label] = np.zeros(1)
ap[iou_idx][label] = np.zeros(1)
return recall, precision, ap, new_ret_values
def indoor_eval(pts_paths,
gt_annos,
dt_annos,
metric,
label2cat,
logger=None,
box_type_3d=None,
box_mode_3d=None):
"""Indoor Evaluation.
Evaluate the result of the detection.
Args:
gt_annos (list[dict]): Ground truth annotations.
dt_annos (list[dict]): Detection annotations. the dict
includes the following keys
- labels_3d (torch.Tensor): Labels of boxes.
- boxes_3d (:obj:`BaseInstance3DBoxes`): \
3D bounding boxes in Depth coordinate.
- scores_3d (torch.Tensor): Scores of boxes.
metric (list[float]): IoU thresholds for computing average precisions.
label2cat (dict): Map from label to category.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmdet.utils.print_log()` for details. Default: None.
Return:
dict[str, float]: Dict of results.
"""
assert len(dt_annos) == len(gt_annos)
pred = {} # map {class_id: pred}
gt = {} # map {class_id: gt}
for img_id in range(len(dt_annos)):
# parse detected annotations
det_anno = dt_annos[img_id]
for i in range(len(det_anno['labels_3d'])):
label = det_anno['labels_3d'].numpy()[i]
bbox = det_anno['boxes_3d'].convert_to(box_mode_3d)[i]
score = det_anno['scores_3d'].numpy()[i]
if label not in pred:
pred[int(label)] = {}
if img_id not in pred[label]:
pred[int(label)][img_id] = []
if label not in gt:
gt[int(label)] = {}
if img_id not in gt[label]:
gt[int(label)][img_id] = []
pred[int(label)][img_id].append((bbox, score))
# parse gt annotations
gt_anno = gt_annos[img_id]
if gt_anno['gt_num'] != 0:
gt_boxes = box_type_3d(
gt_anno['gt_boxes_upright_depth'],
box_dim=gt_anno['gt_boxes_upright_depth'].shape[-1],
origin=(0.5, 0.5, 0.5)).convert_to(box_mode_3d)
labels_3d = gt_anno['class']
else:
gt_boxes = box_type_3d(np.array([], dtype=np.float32))
labels_3d = np.array([], dtype=np.int64)
for i in range(len(labels_3d)):
label = labels_3d[i]
bbox = gt_boxes[i]
if label not in gt:
gt[label] = {}
if img_id not in gt[label]:
gt[label][img_id] = []
gt[label][img_id].append(bbox)
rec, prec, ap, new_ret_dict = eval_map_recall(pred, gt, metric)
ret_dict = dict()
# Export additional information for custom metrics calculation.
ret_dict["pts_paths"] = pts_paths
ret_dict["new_ret_dict"] = {label2cat[label] : metrics for label, metrics in new_ret_dict.items()}
header = ['classes']
table_columns = [[label2cat[label]
for label in ap[0].keys()] + ['Overall']]
for i, iou_thresh in enumerate(metric):
header.append(f'AP_{iou_thresh:.2f}')
header.append(f'AR_{iou_thresh:.2f}')
rec_list = []
for label in ap[i].keys():
ret_dict[f'{label2cat[label]}_AP_{iou_thresh:.2f}'] = float(
ap[i][label][0])
ret_dict[f'mAP_{iou_thresh:.2f}'] = float(
np.mean(list(ap[i].values())))
table_columns.append(list(map(float, list(ap[i].values()))))
table_columns[-1] += [ret_dict[f'mAP_{iou_thresh:.2f}']]
table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]]
for label in rec[i].keys():
ret_dict[f'{label2cat[label]}_rec_{iou_thresh:.2f}'] = float(
rec[i][label][-1])
rec_list.append(rec[i][label][-1])
ret_dict[f'mAR_{iou_thresh:.2f}'] = float(np.mean(rec_list))
table_columns.append(list(map(float, rec_list)))
table_columns[-1] += [ret_dict[f'mAR_{iou_thresh:.2f}']]
table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]]
table_data = [header]
table_rows = list(zip(*table_columns))
table_data += table_rows
table = AsciiTable(table_data)
table.inner_footing_row_border = True
print_log('\n' + table.table, logger=logger)
return ret_dict
| [
"numpy.maximum",
"numpy.sum",
"terminaltables.AsciiTable",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"numpy.cumsum",
"numpy.finfo",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.arange",
"torch.zeros",
"mmcv.utils.print_log"
] | [((985, 1023), 'numpy.zeros', 'np.zeros', (['num_scales'], {'dtype': 'np.float32'}), '(num_scales, dtype=np.float32)\n', (993, 1023), True, 'import numpy as np\n'), ((4028, 4048), 'numpy.array', 'np.array', (['confidence'], {}), '(confidence)\n', (4036, 4048), True, 'import numpy as np\n'), ((4092, 4115), 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), '(-confidence)\n', (4102, 4115), True, 'import numpy as np\n'), ((11714, 11736), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (11724, 11736), False, 'from terminaltables import AsciiTable\n'), ((11783, 11827), 'mmcv.utils.print_log', 'print_log', (["('\\n' + table.table)"], {'logger': 'logger'}), "('\\n' + table.table, logger=logger)\n", (11792, 11827), False, 'from mmcv.utils import print_log\n'), ((1063, 1109), 'numpy.zeros', 'np.zeros', (['(num_scales, 1)'], {'dtype': 'recalls.dtype'}), '((num_scales, 1), dtype=recalls.dtype)\n', (1071, 1109), True, 'import numpy as np\n'), ((1125, 1170), 'numpy.ones', 'np.ones', (['(num_scales, 1)'], {'dtype': 'recalls.dtype'}), '((num_scales, 1), dtype=recalls.dtype)\n', (1132, 1170), True, 'import numpy as np\n'), ((1186, 1219), 'numpy.hstack', 'np.hstack', (['(zeros, recalls, ones)'], {}), '((zeros, recalls, ones))\n', (1195, 1219), True, 'import numpy as np\n'), ((1235, 1272), 'numpy.hstack', 'np.hstack', (['(zeros, precisions, zeros)'], {}), '((zeros, precisions, zeros))\n', (1244, 1272), True, 'import numpy as np\n'), ((3379, 3425), 'torch.zeros', 'torch.zeros', (['(cur_num, 7)'], {'dtype': 'torch.float32'}), '((cur_num, 7), dtype=torch.float32)\n', (3390, 3425), False, 'import torch\n'), ((4287, 4299), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (4295, 4299), True, 'import numpy as np\n'), ((4332, 4344), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (4340, 4344), True, 'import numpy as np\n'), ((5622, 5648), 'numpy.cumsum', 'np.cumsum', (['fp_thr[iou_idx]'], {}), '(fp_thr[iou_idx])\n', (5631, 5648), True, 'import numpy as np\n'), ((5662, 5688), 'numpy.cumsum', 'np.cumsum', (['tp_thr[iou_idx]'], {}), '(tp_thr[iou_idx])\n', (5671, 5688), True, 'import numpy as np\n'), ((1352, 1390), 'numpy.maximum', 'np.maximum', (['mpre[:, i - 1]', 'mpre[:, i]'], {}), '(mpre[:, i - 1], mpre[:, i])\n', (1362, 1390), True, 'import numpy as np\n'), ((1506, 1566), 'numpy.sum', 'np.sum', (['((mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])'], {}), '((mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n', (1512, 1566), True, 'import numpy as np\n'), ((2736, 2785), 'torch.zeros', 'torch.zeros', (['[cur_gt_num, 7]'], {'dtype': 'torch.float32'}), '([cur_gt_num, 7], dtype=torch.float32)\n', (2747, 2785), False, 'import torch\n'), ((9776, 9804), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (9784, 9804), True, 'import numpy as np\n'), ((11385, 11402), 'numpy.mean', 'np.mean', (['rec_list'], {}), '(rec_list)\n', (11392, 11402), True, 'import numpy as np\n'), ((1445, 1482), 'numpy.where', 'np.where', (['(mrec[i, 1:] != mrec[i, :-1])'], {}), '(mrec[i, 1:] != mrec[i, :-1])\n', (1453, 1482), True, 'import numpy as np\n'), ((1672, 1700), 'numpy.arange', 'np.arange', (['(0)', '(1 + 0.001)', '(0.1)'], {}), '(0, 1 + 0.001, 0.1)\n', (1681, 1700), True, 'import numpy as np\n'), ((7276, 7287), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (7284, 7287), True, 'import numpy as np\n'), ((7332, 7343), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (7340, 7343), True, 'import numpy as np\n'), ((7381, 7392), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (7389, 7392), True, 'import numpy as np\n'), ((9720, 9750), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (9728, 9750), True, 'import numpy as np\n'), ((3997, 4008), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4005, 4008), True, 'import numpy as np\n'), ((5870, 5890), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (5878, 5890), True, 'import numpy as np\n')] |
import uf
import numpy as np
def get_best_f1(probs, labels, label_index=1):
assert len(probs) == len(labels)
probs = np.array(probs)
labels = np.array(labels)
num = np.sum(labels == label_index)
tp = num
fp = len(labels) - num
fn = 0
tn = 0
accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-9)
precision = tp / (tp + fp + 1e-9)
recall = tp / (tp + fn + 1e-9)
f1 = 2 * precision * recall / (precision + recall + 1e-9)
threshold = 0
ids = sorted(
list(range(len(probs))), key=lambda i: probs[i])
for i in ids:
prob = probs[i]
label = labels[i]
if label == label_index:
tp -= 1
fn += 1
elif label != label_index:
fp -= 1
tn += 1
_accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-9)
_precision = tp / (tp + fp + 1e-9)
_recall = tp / (tp + fn + 1e-9)
_f1 = 2 * _precision * _recall / (_precision + _recall + 1e-9)
if _f1 > f1:
accuracy = _accuracy
precision = _precision
recall = _recall
f1 = _f1
threshold = prob
return (accuracy, precision, recall, f1, threshold, num)
def main():
uf.set_log('./log')
# load data
with open('./data/SST-2/train.tsv', encoding='utf-8') as f:
X, y = [], []
for line in f.readlines()[1:]: # ignore title
line = line.strip().split('\t')
X.append(line[0])
y.append(int(line[1]))
with open('./data/SST-2/dev.tsv', encoding='utf-8') as f:
X_dev, y_dev = [], []
for line in f.readlines()[1:]: # ignore title
line = line.strip().split('\t')
X_dev.append(line[0])
y_dev.append(int(line[1]))
# modeling
model = uf.BERTClassifier(
config_file='./bert-base-zh/bert_config.json',
vocab_file='./bert-base-zh/vocab.txt',
max_seq_length=128,
init_checkpoint='./bert-base-zh',
output_dir='outputs',
gpu_ids='0')
# training
for epoch in range(3):
model.fit(
X, y,
batch_size=64,
target_steps=-(epoch + 1),
total_steps=-3,
print_per_secs=5,
save_per_steps=1000000)
model.cache('epoch_%d' % epoch)
probs = model.predict(X_dev)['probs']
for i in range(2):
acc, pre, rec, f1, thresh, num = get_best_f1(
probs[:, i], y_dev, label_index=i)
print('[dev] label %d (%d): accuracy %.6f, precision %.6f, '
'recall %.6f, f1 %.6f, thresh %s'
% (i, num, acc, pre, rec, f1, thresh))
print('Application finished.')
if __name__ == '__main__':
main()
| [
"uf.set_log",
"numpy.array",
"numpy.sum",
"uf.BERTClassifier"
] | [((133, 148), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (141, 148), True, 'import numpy as np\n'), ((163, 179), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (171, 179), True, 'import numpy as np\n'), ((191, 220), 'numpy.sum', 'np.sum', (['(labels == label_index)'], {}), '(labels == label_index)\n', (197, 220), True, 'import numpy as np\n'), ((1288, 1307), 'uf.set_log', 'uf.set_log', (['"""./log"""'], {}), "('./log')\n", (1298, 1307), False, 'import uf\n'), ((1888, 2090), 'uf.BERTClassifier', 'uf.BERTClassifier', ([], {'config_file': '"""./bert-base-zh/bert_config.json"""', 'vocab_file': '"""./bert-base-zh/vocab.txt"""', 'max_seq_length': '(128)', 'init_checkpoint': '"""./bert-base-zh"""', 'output_dir': '"""outputs"""', 'gpu_ids': '"""0"""'}), "(config_file='./bert-base-zh/bert_config.json', vocab_file\n ='./bert-base-zh/vocab.txt', max_seq_length=128, init_checkpoint=\n './bert-base-zh', output_dir='outputs', gpu_ids='0')\n", (1905, 2090), False, 'import uf\n')] |
"""
Reproduction of LIO in the Escape Room
version 0.1:
Fixed the gradient graph.
Used PyTorchViz to trace BP route.
Used Torchmeta to allow net parameters to be backward.
"""
import numpy as np
import random
import torch
from torch.nn import functional as F
from matplotlib import pyplot as plt
from lio.agent.lio_agent import Actor
from lio.model.actor_net import Trajectory
from lio.alg import config_room_lio
from lio.env import room_symmetric
from lio.utils.util import grad_graph
def train(config):
# set seeds for the training
# seed = config.main.seed
seed = 1234
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
n_episodes = int(config.alg.n_episodes)
n_eval = config.alg.n_eval
period = config.alg.period
results_one = []
results_two = []
reward_one_to_two = []
reward_two_to_one = []
# 初始化环境
env = room_symmetric.Env(config.env)
agents = []
for i in range(config.env.n_agents):
agents.append(Actor(i, 7, config.env.n_agents))
# epoch start
for epoch in range(5000):
trajs = [Trajectory() for _ in range(env.n_agents)]
list_obs = env.reset()
list_obs_next = None
done = False
result_one = 0
result_two = 0
while not done:
list_act = []
list_act_hot = []
if list_obs_next is not None:
list_obs = list_obs_next
# set observations and decide actions
for agent in agents:
agent.set_obs(list_obs[agent.id])
agent.action_sampling()
list_act_hot.append(agent.get_action_hot())
list_act.append(agent.get_action())
list_rewards = []
total_reward_given_to_each_agent = torch.zeros(env.n_agents)
reward = [None for _ in range(env.n_agents)]
# give rewards
for agent in agents:
reward[agent.id] = agent.give_reward(list_act_hot)
for idx in range(env.n_agents):
if idx != agent.id:
total_reward_given_to_each_agent[idx] += reward[agent.id][idx] # 各个智能体受到的激励
reward_sum = (reward[agent.id].sum() - reward[agent.id][agent.id]).detach().numpy() # 计算自己在这一步给予别人的总激励
list_rewards.append(reward_sum)
# execute step
list_obs_next, env_rewards, done = env.step(list_act, list_rewards)
for agent in agents:
reward_given = total_reward_given_to_each_agent[agent.id]
trajs[agent.id].add(agent.get_obs(), agent.get_action(), agent.get_action_hot(), env_rewards[agent.id], reward_given)
result_one += env_rewards[0]
result_two += env_rewards[1]
for agent in agents:
agent.update_policy(trajs[agent.id])
# Generate a new trajectory
trajs_new = [Trajectory() for _ in range(env.n_agents)]
list_obs = env.reset()
list_obs_next = None
done = False
result_one_new = 0
result_two_new = 0
while not done:
list_act = []
list_act_hot = []
if list_obs_next is not None:
list_obs = list_obs_next
# set observations and decide actions
for agent in agents:
agent.set_obs(list_obs[agent.id])
agent.action_sampling(agent.new_params)
list_act_hot.append(agent.get_action_hot())
list_act.append(agent.get_action())
list_rewards = []
total_reward_given_to_each_agent = torch.zeros(env.n_agents)
reward_new = [None for _ in range(env.n_agents)]
# give rewards
for agent in agents:
reward_new[agent.id] = agent.give_reward(list_act_hot)
reward_sum = torch.zeros(1)
for idx in range(env.n_agents):
if idx != agent.id:
total_reward_given_to_each_agent[idx] += reward_new[agent.id][idx]
reward_sum += reward_new[agent.id][idx] # 计算自己总共给予了多少报酬
reward_sum = (reward_new[agent.id].sum() - reward_new[agent.id][agent.id]).detach().numpy()
list_rewards.append(reward_sum)
if agent.id == 0:
reward_one_to_two.append(reward_sum)
else:
reward_two_to_one.append(reward_sum)
# execute step
list_obs_next, env_rewards, done = env.step(list_act, list_rewards)
for agent in agents:
reward_given = total_reward_given_to_each_agent[agent.id]
trajs_new[agent.id].add(agent.get_obs(), agent.get_action(), agent.get_action_hot(), env_rewards[agent.id], reward_given)
result_one_new += env_rewards[0]
result_two_new += env_rewards[1]
if done:
results_one.append(result_one_new)
results_two.append(result_two_new)
# compute new log prob act
log_prob_act_other = [[] for _ in range(config.env.n_agents)]
for agent in agents:
states_new = [trajectory.get_state() for trajectory in trajs_new]
actions_new = [trajectory.get_action() for trajectory in trajs_new]
logits, _ = agent.policy_net(states_new[agent.id], agent.new_params)
# grad_graph(logits, 'logits')
log_prob = F.log_softmax(logits, dim=-1)
log_prob_act = torch.stack([log_prob[i][actions_new[agent.id][i]]
for i in range(len(actions_new[agent.id]))],
dim=0)
log_prob_act_other[agent.id] = log_prob_act
for agent in agents:
agent.update_rewards_giving(trajs, trajs_new, log_prob_act_other)
for agent in agents:
agent.update_to_new_params()
return results_one, results_two, reward_one_to_two, reward_two_to_one
def run_epoch():
# TODO
pass
if __name__ == "__main__":
config = config_room_lio.get_config()
with torch.autograd.set_detect_anomaly(True):
results_one, result_two, reward1, reward2 = train(config)
plt.figure(1)
plt.subplot(211)
plt.plot(results_one)
plt.plot(result_two)
plt.subplot(212)
plt.plot(reward1)
plt.plot(reward2)
plt.show()
| [
"matplotlib.pyplot.subplot",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"lio.agent.lio_agent.Actor",
"torch.manual_seed",
"lio.env.room_symmetric.Env",
"lio.model.actor_net.Trajectory",
"lio.alg.config_room_lio.get_config",
"matplotlib.pyplot.figure",
"random.seed",... | [((595, 615), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (609, 615), True, 'import numpy as np\n'), ((620, 637), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (631, 637), False, 'import random\n'), ((642, 665), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (659, 665), False, 'import torch\n'), ((893, 923), 'lio.env.room_symmetric.Env', 'room_symmetric.Env', (['config.env'], {}), '(config.env)\n', (911, 923), False, 'from lio.env import room_symmetric\n'), ((6150, 6178), 'lio.alg.config_room_lio.get_config', 'config_room_lio.get_config', ([], {}), '()\n', (6176, 6178), False, 'from lio.alg import config_room_lio\n'), ((6300, 6313), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (6310, 6313), True, 'from matplotlib import pyplot as plt\n'), ((6318, 6334), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6329, 6334), True, 'from matplotlib import pyplot as plt\n'), ((6339, 6360), 'matplotlib.pyplot.plot', 'plt.plot', (['results_one'], {}), '(results_one)\n', (6347, 6360), True, 'from matplotlib import pyplot as plt\n'), ((6365, 6385), 'matplotlib.pyplot.plot', 'plt.plot', (['result_two'], {}), '(result_two)\n', (6373, 6385), True, 'from matplotlib import pyplot as plt\n'), ((6390, 6406), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (6401, 6406), True, 'from matplotlib import pyplot as plt\n'), ((6411, 6428), 'matplotlib.pyplot.plot', 'plt.plot', (['reward1'], {}), '(reward1)\n', (6419, 6428), True, 'from matplotlib import pyplot as plt\n'), ((6433, 6450), 'matplotlib.pyplot.plot', 'plt.plot', (['reward2'], {}), '(reward2)\n', (6441, 6450), True, 'from matplotlib import pyplot as plt\n'), ((6455, 6465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6463, 6465), True, 'from matplotlib import pyplot as plt\n'), ((6188, 6227), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (6221, 6227), False, 'import torch\n'), ((1004, 1036), 'lio.agent.lio_agent.Actor', 'Actor', (['i', '(7)', 'config.env.n_agents'], {}), '(i, 7, config.env.n_agents)\n', (1009, 1036), False, 'from lio.agent.lio_agent import Actor\n'), ((1104, 1116), 'lio.model.actor_net.Trajectory', 'Trajectory', ([], {}), '()\n', (1114, 1116), False, 'from lio.model.actor_net import Trajectory\n'), ((1801, 1826), 'torch.zeros', 'torch.zeros', (['env.n_agents'], {}), '(env.n_agents)\n', (1812, 1826), False, 'import torch\n'), ((2939, 2951), 'lio.model.actor_net.Trajectory', 'Trajectory', ([], {}), '()\n', (2949, 2951), False, 'from lio.model.actor_net import Trajectory\n'), ((3660, 3685), 'torch.zeros', 'torch.zeros', (['env.n_agents'], {}), '(env.n_agents)\n', (3671, 3685), False, 'import torch\n'), ((5520, 5549), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (5533, 5549), True, 'from torch.nn import functional as F\n'), ((3908, 3922), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (3919, 3922), False, 'import torch\n')] |
import numpy as np
def get_transit_mask(time, period, t0, duration, dur_mult=1):
msk = np.zeros_like(time, dtype=bool)
tt = t0
while tt < time[-1]:
msk[(time >= tt - (dur_mult*duration/2.)) & (time <= tt + (dur_mult*duration/2.))] = True
tt += period
return msk | [
"numpy.zeros_like"
] | [((95, 126), 'numpy.zeros_like', 'np.zeros_like', (['time'], {'dtype': 'bool'}), '(time, dtype=bool)\n', (108, 126), True, 'import numpy as np\n')] |
# coding: utf-8
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import numpy as np
from PIL.JpegImagePlugin import JpegImageFile
from PIL import Image, ImageEnhance
from sklearn.model_selection import train_test_split
class DataAugmentation(object):
"""DataAugmentation class.
Give the directory name containing the image you want to increase as argument.
"""
def __init__(self, target_dir):
"""Initializer for DataAugmentation.
:param target_dir: String
Target directory name
"""
self.target_dir = target_dir
self.__current_path = os.getcwd() # カレントディレクトリを取得
self.__target_path = self.__current_path + "/" + target_dir
self.__file_path = []
self.__file_name = []
def init(self): # 対象ディレクトリ内の画像ファイルの名前を全部取得
"""Gets all names of image files in the specified directory.
:return: object
"""
for filename in os.listdir(self.__target_path):
if os.path.isfile(self.__target_path + "/" + filename):
self.__file_path.append(self.__target_path + "/" + filename) # 画像へのパスを格納
self.__file_name.append(filename)
return self
@staticmethod
def __mirror(image):
"""Flips the specified image horizontally.
:param image:
:return:
"""
image = np.array(image)
image = image[:, ::-1, :]
image = Image.fromarray(image)
return image
@staticmethod
def __flip(image):
"""Flip the specified image vertically.
:param image:
:return:
"""
image = np.array(image)
image = image[::-1, :, :]
image = Image.fromarray(image)
return image
@staticmethod
def __random_brightness(image):
"""Decrease the brightness of the specified image randomly.
:param image:
:return:
"""
image = ImageEnhance.Brightness(image)
image = image.enhance(np.random.uniform(low=0.5, high=0.8))
return image
@staticmethod
def __random_contrast(image):
"""Raise the contrast of the specified image randomly.
:param image:
:return:
"""
image = ImageEnhance.Contrast(image)
image = image.enhance(np.random.uniform(low=0.3, high=0.8))
return image
@staticmethod
def _random_masked(image):
"""Based on the vertical or horizontal length of the input image,
make a mask 0.3 - 0.5 times the length of the shorter side, and mask the random position of the image.
Reference: Improved Regularization of Convolutional Neural Networks with Cutout
:param image:
:return:
"""
image = np.array(image)
mask_value = image.mean()
h, w, _ = image.shape
mask_size = np.random.uniform(0.3, 0.5) * w
if h < w:
mask_size = np.random.uniform(0.3, 0.5) * h
top = np.random.randint(0 - mask_size // 2, h - mask_size)
left = np.random.randint(0 - mask_size // 2, w - mask_size)
bottom = int(top + mask_size)
right = int(left + mask_size)
if top < 0:
top = 0
if left < 0:
left = 0
masked_image = np.copy(image)
masked_image[top:bottom, left:right, :].fill(mask_value)
masked_image = Image.fromarray(masked_image)
return masked_image
def __write_image(self, input_image, file_name):
""" Store the created image in the same directory as the original image.
:param input_image:
:param file_name:
:return: Image file
"""
try:
input_image.save(self.__target_path + "/" + file_name)
except OSError:
print("*"*20)
print("ERROR: {}".format(OSError))
print("*" * 20)
except MemoryError:
print("*" * 20)
print("ERROR: {}".format(MemoryError))
print("*" * 20)
def augment(self, mirror=False, flip=False, brightness=False, contrast=False, mask=False):
"""Effect is applied to the image based on the argument, and it saves as an image file.
:param mirror: Bool
Default: False
:param flip:
Default: False
:param brightness:
Default: False
:param contrast:
Default: False
:param mask:
Default: False
:return: Image files
"""
for i, file in enumerate(self.__file_path):
img = Image.open(file)
if mirror:
mirrored_img = self.__mirror(img)
self.__write_image(mirrored_img, "/mirrored_{}".format(self.__file_name[i]))
if flip:
flipped_img = self.__flip(img)
self.__write_image(flipped_img, "/flipped_{}".format(self.__file_name[i]))
if brightness:
brightened_img = self.__random_brightness(img)
self.__write_image(brightened_img, "/brightened_{}".format(self.__file_name[i]))
if contrast:
edited_img = self.__random_contrast(img)
self.__write_image(edited_img, "/contrasted_{}".format(self.__file_name[i]))
if mask:
for j in range(5):
masked_img = self._random_masked(img)
self.__write_image(masked_img, "/masked_{}_{}".format(j, self.__file_name[i]))
def scaling(self, scale=1.5):
"""
:param scale: Float
:return: Image file
"""
new_dirname = "scaled_{}".format(self.target_dir)
os.makedirs(new_dirname)
for i, file in enumerate(self.__file_path):
img = Image.open(file)
img = img.resize((int(img.size[0] * scale), int(img.size[1] * scale)))
try:
img.save("{}/{}/{}.jpg".format(self.__current_path, new_dirname, i))
except OSError:
print("*" * 20)
print("ERROR: {}".format(OSError))
print("*" * 20)
except MemoryError:
print("*" * 20)
print("ERROR: {}".format(MemoryError))
print("*" * 20)
class MakeCategory(object):
"""Create an .npz file which containing image arrays and labels.
If you want to create a data-set with many categories,
create a directory containing images as many as the number of categories,
convert each directory to an .npz file, and use the class `BuildDataset()` to generate a data-set.
"""
def __init__(self, target_dir):
"""Initializer for CreateCategory.
:param target_dir: String
Specify the directory name where the image is saved.
"""
self.__current_path = os.getcwd() # カレントディレクトリを取得
self.__target_path = self.__current_path + "/" + target_dir
self.__file_names = []
self.__image_files = []
self.__labels = []
def __get_image_name(self): # 対象ディレクトリ内の画像ファイルの名前を全部取得
"""Gets all names of image files in the specified directory.
:return: object
"""
for filename in os.listdir(self.__target_path):
_, fmt = os.path.splitext(filename)
if fmt == ".jpg" or fmt == ".JPG" or fmt == ".jpeg" or fmt == ".JPEG":
if os.path.isfile(self.__target_path + "/" + filename):
self.__file_names.append(self.__target_path + "/" + filename) # 画像へのパスを格納
else:
sys.stderr.write("ERROR: Contained unsupported file format. This version only supports JPEG format.\n")
sys.stderr.write("Delete files other than JPEG format.\n")
break
return self
def __read_image(self, size=(64, 64), mode="RGB"): # 画像ファイルをnumpy配列にして、それを配列に格納
"""Convert the image in the specified directory to NumPy array
based on the file name obtained by the `__get_image_name()` method.
:return: object
"""
for file_name in self.__file_names:
img = Image.open(file_name)
if mode == "gray":
img = img.convert("L")
img = img.resize(size)
img = np.array(img)
self.__image_files.append(img)
else:
if type(img) is JpegImageFile:
img = img.resize(size)
img = np.array(img)
self.__image_files.append(img)
else:
sys.stderr.write("ERROR: Unsupported file format. This version only supports JPEG format.\n")
break
return self
def __make_label(self, label=0):
"""Create a label for supervised learning. Must be Unsigned Integer !
:param label: Int
The label to assign to the images. Must be Unsigned Integer !
:return:
"""
if type(label) is int:
for i in range(len(self.__file_names)):
self.__labels.append(label)
return self
else:
print("Error: The value assigned to the label variable must be `Positive Integer`.")
def init(self, label, size, mode="RGB"):
"""Convert the image in the target directory to NumPy array and assign an appropriate label.
:param label: Int
The label to assign to the images. Must be Unsigned Integer!
:param size: (Int, Int)
:param mode Image mode: if "RGB" Read Image as Color, if "gray", Read Image as Gray.
:return:
"""
self.__get_image_name().__read_image(size=size, mode=mode).__make_label(label=label)
return self
def export_category(self, filename, verbose=False): # ファイルの書きだし、ファイル名のみで良い。.npzは不要
"""Export the .npz file based on the data stored in the array.
:param filename: String
Name of the .npz file.
:param verbose: Bool
If True, display the log. Logs are output even if False when an error occurs.
:return:
.npz file
"""
np_labels = np.array(self.__labels, dtype=np.uint8)
np_labels = np_labels.reshape(np_labels.shape[0], 1) # 更新
np_image_files = np.array(self.__image_files, dtype=np.uint8)
try:
np.savez_compressed(filename, image=np_image_files, label=np_labels)
except OSError:
print("*" * 20)
print("ERROR: {}".format(OSError))
print("*" * 20)
except MemoryError:
print("*" * 20)
print("ERROR: {}".format(MemoryError))
print("*" * 20)
else:
if verbose:
print("file name: {}".format(filename))
class BuildDataset(object):
"""Create a data-set based on the .npz file containing images and labels created with `CreateCategory()`.
"""
def __init__(self, *args):
"""It reads by specifying the .npz file created by `CreateCategory()` and stores it in the array.
:param args: String
arg[1] : file name 1 (.npz)
arg[2] : file name 2 (.npz)
...
"""
load_file = np.load(args[0])
img_data = load_file["image"]
label_data = load_file["label"]
self.__img = np.copy(img_data)
self.__label = np.copy(label_data)
for i in range(1, len(args)):
load_file = np.load(args[i])
img_data = load_file["image"]
label_data = load_file["label"]
self.__img = np.append(self.__img, img_data, axis=0)
self.__label = np.append(self.__label, label_data, axis=0)
def export_dataset(self, filename, verbose=False):
"""Export the data-set based on the data stored in the array at instance creation time.
:param filename: String
Name of the date set.
:param verbose: Bool
If True, display the log. Logs are output even if False when an error occurs.
:return:
.npz file
"""
np_imgs = np.array(self.__img, dtype=np.uint8)
np_labels = np.array(self.__label, dtype=np.uint8)
try:
np.savez_compressed(filename, image=np_imgs, label=np_labels)
except OSError:
print("*" * 20)
print("ERROR: {}".format(OSError))
print("*" * 20)
except MemoryError:
print("*" * 20)
print("ERROR: {}".format(MemoryError))
print("*" * 20)
else:
if verbose:
print("Data set name: {}".format(filename))
class ExpandImgData(object):
"""Expands the specified data set and extracts image data and label.
"""
def __init__(self, filename):
"""Specify the data set to be expanded.
:param filename: String
Target file.
"""
self.__loaded_file = np.load(filename)
self.__images = self.__loaded_file["image"]
self.__labels = self.__loaded_file["label"]
def load_data(self, test_size=0.3, division=True, shuffle=True):
"""Expands the data set, extracts image data and labels, returns lists or tuples based on arguments.
:param test_size: Float (default=0.3)
Percentage of test size. (It should be 0.0 ~ 1.0.)
Set the ratio of the number of test samples.
:param division: Bool
Whether to split the data into batches for training and testing.
:param shuffle: Bool
Whether to shuffle the data before splitting into batches.
:return: List ot Tuple
IF division == True
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
IF division == False
List of Numpy arrays: `x, y`.
"""
try:
if division:
x_train, x_test, y_train, y_test = \
train_test_split(self.__images, self.__labels, test_size=test_size, shuffle=shuffle)
return (x_train, y_train), (x_test, y_test)
else:
x = self.__images
y = self.__labels
return x, y
except MemoryError:
print("*" * 20)
print("ERROR: {}".format(MemoryError))
print("*" * 20)
| [
"numpy.random.uniform",
"numpy.load",
"PIL.ImageEnhance.Brightness",
"os.makedirs",
"numpy.copy",
"os.getcwd",
"sklearn.model_selection.train_test_split",
"PIL.ImageEnhance.Contrast",
"PIL.Image.open",
"numpy.append",
"os.path.isfile",
"numpy.random.randint",
"numpy.array",
"os.path.splite... | [((1682, 1693), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1691, 1693), False, 'import os\n'), ((2096, 2126), 'os.listdir', 'os.listdir', (['self.__target_path'], {}), '(self.__target_path)\n', (2106, 2126), False, 'import os\n'), ((2520, 2535), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2528, 2535), True, 'import numpy as np\n'), ((2586, 2608), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (2601, 2608), False, 'from PIL import Image, ImageEnhance\n'), ((2788, 2803), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2796, 2803), True, 'import numpy as np\n'), ((2854, 2876), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (2869, 2876), False, 'from PIL import Image, ImageEnhance\n'), ((3089, 3119), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['image'], {}), '(image)\n', (3112, 3119), False, 'from PIL import Image, ImageEnhance\n'), ((3393, 3421), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['image'], {}), '(image)\n', (3414, 3421), False, 'from PIL import Image, ImageEnhance\n'), ((3904, 3919), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3912, 3919), True, 'import numpy as np\n'), ((4126, 4178), 'numpy.random.randint', 'np.random.randint', (['(0 - mask_size // 2)', '(h - mask_size)'], {}), '(0 - mask_size // 2, h - mask_size)\n', (4143, 4178), True, 'import numpy as np\n'), ((4194, 4246), 'numpy.random.randint', 'np.random.randint', (['(0 - mask_size // 2)', '(w - mask_size)'], {}), '(0 - mask_size // 2, w - mask_size)\n', (4211, 4246), True, 'import numpy as np\n'), ((4430, 4444), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (4437, 4444), True, 'import numpy as np\n'), ((4533, 4562), 'PIL.Image.fromarray', 'Image.fromarray', (['masked_image'], {}), '(masked_image)\n', (4548, 4562), False, 'from PIL import Image, ImageEnhance\n'), ((6834, 6858), 'os.makedirs', 'os.makedirs', (['new_dirname'], {}), '(new_dirname)\n', (6845, 6858), False, 'import os\n'), ((7997, 8008), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8006, 8008), False, 'import os\n'), ((8453, 8483), 'os.listdir', 'os.listdir', (['self.__target_path'], {}), '(self.__target_path)\n', (8463, 8483), False, 'import os\n'), ((11484, 11523), 'numpy.array', 'np.array', (['self.__labels'], {'dtype': 'np.uint8'}), '(self.__labels, dtype=np.uint8)\n', (11492, 11523), True, 'import numpy as np\n'), ((11626, 11670), 'numpy.array', 'np.array', (['self.__image_files'], {'dtype': 'np.uint8'}), '(self.__image_files, dtype=np.uint8)\n', (11634, 11670), True, 'import numpy as np\n'), ((12568, 12584), 'numpy.load', 'np.load', (['args[0]'], {}), '(args[0])\n', (12575, 12584), True, 'import numpy as np\n'), ((12685, 12702), 'numpy.copy', 'np.copy', (['img_data'], {}), '(img_data)\n', (12692, 12702), True, 'import numpy as np\n'), ((12726, 12745), 'numpy.copy', 'np.copy', (['label_data'], {}), '(label_data)\n', (12733, 12745), True, 'import numpy as np\n'), ((13460, 13496), 'numpy.array', 'np.array', (['self.__img'], {'dtype': 'np.uint8'}), '(self.__img, dtype=np.uint8)\n', (13468, 13496), True, 'import numpy as np\n'), ((13517, 13555), 'numpy.array', 'np.array', (['self.__label'], {'dtype': 'np.uint8'}), '(self.__label, dtype=np.uint8)\n', (13525, 13555), True, 'import numpy as np\n'), ((14301, 14318), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (14308, 14318), True, 'import numpy as np\n'), ((2143, 2194), 'os.path.isfile', 'os.path.isfile', (["(self.__target_path + '/' + filename)"], {}), "(self.__target_path + '/' + filename)\n", (2157, 2194), False, 'import os\n'), ((3150, 3186), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.5)', 'high': '(0.8)'}), '(low=0.5, high=0.8)\n', (3167, 3186), True, 'import numpy as np\n'), ((3452, 3488), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.3)', 'high': '(0.8)'}), '(low=0.3, high=0.8)\n', (3469, 3488), True, 'import numpy as np\n'), ((4005, 4032), 'numpy.random.uniform', 'np.random.uniform', (['(0.3)', '(0.5)'], {}), '(0.3, 0.5)\n', (4022, 4032), True, 'import numpy as np\n'), ((5731, 5747), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (5741, 5747), False, 'from PIL import Image, ImageEnhance\n'), ((6930, 6946), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (6940, 6946), False, 'from PIL import Image, ImageEnhance\n'), ((8506, 8532), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (8522, 8532), False, 'import os\n'), ((9410, 9431), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (9420, 9431), False, 'from PIL import Image, ImageEnhance\n'), ((11696, 11764), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'image': 'np_image_files', 'label': 'np_labels'}), '(filename, image=np_image_files, label=np_labels)\n', (11715, 11764), True, 'import numpy as np\n'), ((12809, 12825), 'numpy.load', 'np.load', (['args[i]'], {}), '(args[i])\n', (12816, 12825), True, 'import numpy as np\n'), ((12939, 12978), 'numpy.append', 'np.append', (['self.__img', 'img_data'], {'axis': '(0)'}), '(self.__img, img_data, axis=0)\n', (12948, 12978), True, 'import numpy as np\n'), ((13006, 13049), 'numpy.append', 'np.append', (['self.__label', 'label_data'], {'axis': '(0)'}), '(self.__label, label_data, axis=0)\n', (13015, 13049), True, 'import numpy as np\n'), ((13582, 13643), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'image': 'np_imgs', 'label': 'np_labels'}), '(filename, image=np_imgs, label=np_labels)\n', (13601, 13643), True, 'import numpy as np\n'), ((4079, 4106), 'numpy.random.uniform', 'np.random.uniform', (['(0.3)', '(0.5)'], {}), '(0.3, 0.5)\n', (4096, 4106), True, 'import numpy as np\n'), ((8635, 8686), 'os.path.isfile', 'os.path.isfile', (["(self.__target_path + '/' + filename)"], {}), "(self.__target_path + '/' + filename)\n", (8649, 8686), False, 'import os\n'), ((8818, 8934), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: Contained unsupported file format. This version only supports JPEG format.\n"""'], {}), '(\n """ERROR: Contained unsupported file format. This version only supports JPEG format.\n"""\n )\n', (8834, 8934), False, 'import sys\n'), ((8938, 8996), 'sys.stderr.write', 'sys.stderr.write', (['"""Delete files other than JPEG format.\n"""'], {}), "('Delete files other than JPEG format.\\n')\n", (8954, 8996), False, 'import sys\n'), ((9563, 9576), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9571, 9576), True, 'import numpy as np\n'), ((15323, 15412), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.__images', 'self.__labels'], {'test_size': 'test_size', 'shuffle': 'shuffle'}), '(self.__images, self.__labels, test_size=test_size, shuffle\n =shuffle)\n', (15339, 15412), False, 'from sklearn.model_selection import train_test_split\n'), ((9759, 9772), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9767, 9772), True, 'import numpy as np\n'), ((9866, 9969), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: Unsupported file format. This version only supports JPEG format.\n"""'], {}), "(\n 'ERROR: Unsupported file format. This version only supports JPEG format.\\n'\n )\n", (9882, 9969), False, 'import sys\n')] |
import matplotlib.pyplot as plt
import numpy as np
def plot_global_map(globalmapfile):
data = np.load(globalmapfile)
x, y = data['poleparams'][:, :2].T
plt.clf()
plt.scatter(x, y, s=1, c='b', marker='.')
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.savefig(globalmapfile[:-4] + '.svg')
plot_global_map('data/pole-dataset/NCLT/globalmap_gt_cluster.npz') | [
"numpy.load",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((99, 121), 'numpy.load', 'np.load', (['globalmapfile'], {}), '(globalmapfile)\n', (106, 121), True, 'import numpy as np\n'), ((165, 174), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (172, 174), True, 'import matplotlib.pyplot as plt\n'), ((179, 220), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(1)', 'c': '"""b"""', 'marker': '"""."""'}), "(x, y, s=1, c='b', marker='.')\n", (190, 220), True, 'import matplotlib.pyplot as plt\n'), ((225, 244), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [m]"""'], {}), "('x [m]')\n", (235, 244), True, 'import matplotlib.pyplot as plt\n'), ((249, 268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y [m]"""'], {}), "('y [m]')\n", (259, 268), True, 'import matplotlib.pyplot as plt\n'), ((273, 313), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(globalmapfile[:-4] + '.svg')"], {}), "(globalmapfile[:-4] + '.svg')\n", (284, 313), True, 'import matplotlib.pyplot as plt\n')] |
from pyrep.backend import sim, utils
from pyrep.objects import Object
from pyrep.objects.dummy import Dummy
from pyrep.robots.configuration_paths.arm_configuration_path import (
ArmConfigurationPath)
from pyrep.robots.robot_component import RobotComponent
from pyrep.objects.cartesian_path import CartesianPath
from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError
from pyrep.const import ConfigurationPathAlgorithms as Algos
from pyrep.const import PYREP_SCRIPT_TYPE
from typing import List, Union
import numpy as np
class Arm(RobotComponent):
"""Base class representing a robot arm with path planning support.
"""
def __init__(self, count: int, name: str, num_joints: int,
base_name: str = None,
max_velocity=1.0, max_acceleration=4.0, max_jerk=1000):
"""Count is used for when we have multiple copies of arms"""
joint_names = ['%s_joint%d' % (name, i+1) for i in range(num_joints)]
super().__init__(count, name, joint_names, base_name)
# Used for motion planning
self.max_velocity = max_velocity
self.max_acceleration = max_acceleration
self.max_jerk = max_jerk
# Motion planning handles
suffix = '' if count == 0 else '#%d' % (count - 1)
self._ik_target = Dummy('%s_target%s' % (name, suffix))
self._ik_tip = Dummy('%s_tip%s' % (name, suffix))
self._ik_group = sim.simGetIkGroupHandle('%s_ik%s' % (name, suffix))
self._collision_collection = sim.simGetCollectionHandle(
'%s_arm%s' % (name, suffix))
def set_ik_element_properties(self, constraint_x=True, constraint_y=True,
constraint_z=True,
constraint_alpha_beta=True,
constraint_gamma=True) -> None:
constraints = 0
if constraint_x:
constraints |= sim.sim_ik_x_constraint
if constraint_y:
constraints |= sim.sim_ik_y_constraint
if constraint_z:
constraints |= sim.sim_ik_z_constraint
if constraint_alpha_beta:
constraints |= sim.sim_ik_alpha_beta_constraint
if constraint_gamma:
constraints |= sim.sim_ik_gamma_constraint
sim.simSetIkElementProperties(
ikGroupHandle=self._ik_group,
tipDummyHandle=self._ik_tip.get_handle(),
constraints=constraints,
precision=None,
weight=None,
)
def set_ik_group_properties(self, resolution_method='pseudo_inverse', max_iterations=6, dls_damping=0.1) -> None:
try:
res_method = {'pseudo_inverse': sim.sim_ik_pseudo_inverse_method,
'damped_least_squares': sim.sim_ik_damped_least_squares_method,
'jacobian_transpose': sim.sim_ik_jacobian_transpose_method}[resolution_method]
except KeyError:
raise Exception('Invalid resolution method,'
'Must be one of ["pseudo_inverse" | "damped_least_squares" | "jacobian_transpose"]')
sim.simSetIkGroupProperties(
ikGroupHandle=self._ik_group,
resolutionMethod=res_method,
maxIterations=max_iterations,
damping=dls_damping
)
def get_configs_for_tip_pose(self,
position: Union[List[float], np.ndarray],
euler: Union[List[float], np.ndarray] = None,
quaternion: Union[List[float], np.ndarray] = None,
ignore_collisions=False,
trials=300, max_configs=60,
relative_to: Object = None
) -> List[List[float]]:
"""Gets a valid joint configuration for a desired end effector pose.
Must specify either rotation in euler or quaternions, but not both!
:param position: The x, y, z position of the target.
:param euler: The x, y, z orientation of the target (in radians).
:param quaternion: A list containing the quaternion (x,y,z,w).
:param ignore_collisions: If collision checking should be disabled.
:param trials: The maximum number of attempts to reach max_configs
:param max_configs: The maximum number of configurations we want to
generate before ranking them.
:param relative_to: Indicates relative to which reference frame we want
the target pose. Specify None to retrieve the absolute pose,
or an Object relative to whose reference frame we want the pose.
:raises: ConfigurationError if no joint configuration could be found.
:return: A list of valid joint configurations for the desired
end effector pose.
"""
if not ((euler is None) ^ (quaternion is None)):
raise ConfigurationPathError(
'Specify either euler or quaternion values, but not both.')
prev_pose = self._ik_target.get_pose()
self._ik_target.set_position(position, relative_to)
if euler is not None:
self._ik_target.set_orientation(euler, relative_to)
elif quaternion is not None:
self._ik_target.set_quaternion(quaternion, relative_to)
handles = [j.get_handle() for j in self.joints]
# Despite verbosity being set to 0, OMPL spits out a lot of text
with utils.suppress_std_out_and_err():
_, ret_floats, _, _ = utils.script_call(
'findSeveralCollisionFreeConfigsAndCheckApproach@PyRep', PYREP_SCRIPT_TYPE,
ints=[self._ik_group, self._collision_collection,
int(ignore_collisions), trials, max_configs] + handles)
self._ik_target.set_pose(prev_pose)
if len(ret_floats) == 0:
raise ConfigurationError(
'Could not find a valid joint configuration for desired end effector pose.')
num_configs = int(len(ret_floats)/len(handles))
return [[ret_floats[len(handles)*i+j] for j in range(len(handles))] for i in range(num_configs)]
def solve_ik(self, position: Union[List[float], np.ndarray],
euler: Union[List[float], np.ndarray] = None,
quaternion: Union[List[float], np.ndarray] = None,
relative_to: Object = None) -> List[float]:
"""Solves an IK group and returns the calculated joint values.
Must specify either rotation in euler or quaternions, but not both!
:param position: The x, y, z position of the target.
:param euler: The x, y, z orientation of the target (in radians).
:param quaternion: A list containing the quaternion (x,y,z,w).
:param relative_to: Indicates relative to which reference frame we want
the target pose. Specify None to retrieve the absolute pose,
or an Object relative to whose reference frame we want the pose.
:return: A list containing the calculated joint values.
"""
self._ik_target.set_position(position, relative_to)
if euler is not None:
self._ik_target.set_orientation(euler, relative_to)
elif quaternion is not None:
self._ik_target.set_quaternion(quaternion, relative_to)
ik_result, joint_values = sim.simCheckIkGroup(
self._ik_group, [j.get_handle() for j in self.joints])
if ik_result == sim.sim_ikresult_fail:
raise IKError('IK failed. Perhaps the distance was between the tip '
' and target was too large.')
elif ik_result == sim.sim_ikresult_not_performed:
raise IKError('IK not performed.')
return joint_values
def get_path_from_cartesian_path(self, path: CartesianPath
) -> ArmConfigurationPath:
"""Translate a path from cartesian space, to arm configuration space.
Note: It must be possible to reach the start of the path via a linear
path, otherwise an error will be raised.
:param path: A :py:class:`CartesianPath` instance to be translated to
a configuration-space path.
:raises: ConfigurationPathError if no path could be created.
:return: A path in the arm configuration space.
"""
handles = [j.get_handle() for j in self.joints]
_, ret_floats, _, _ = utils.script_call(
'getPathFromCartesianPath@PyRep', PYREP_SCRIPT_TYPE,
ints=[path.get_handle(), self._ik_group,
self._ik_target.get_handle()] + handles)
if len(ret_floats) == 0:
raise ConfigurationPathError(
'Could not create a path from cartesian path.')
return ArmConfigurationPath(self, ret_floats)
def get_linear_path(self, position: Union[List[float], np.ndarray],
euler: Union[List[float], np.ndarray] = None,
quaternion: Union[List[float], np.ndarray] = None,
steps=50, ignore_collisions=False,
relative_to: Object = None) -> ArmConfigurationPath:
"""Gets a linear configuration path given a target pose.
Generates a path that drives a robot from its current configuration
to its target dummy in a straight line (i.e. shortest path in Cartesian
space).
Must specify either rotation in euler or quaternions, but not both!
:param position: The x, y, z position of the target.
:param euler: The x, y, z orientation of the target (in radians).
:param quaternion: A list containing the quaternion (x,y,z,w).
:param steps: The desired number of path points. Each path point
contains a robot configuration. A minimum of two path points is
required. If the target pose distance is large, a larger number
of steps leads to better results for this function.
:param ignore_collisions: If collision checking should be disabled.
:param relative_to: Indicates relative to which reference frame we want
the target pose. Specify None to retrieve the absolute pose,
or an Object relative to whose reference frame we want the pose.
:raises: ConfigurationPathError if no path could be created.
:return: A linear path in the arm configuration space.
"""
if not ((euler is None) ^ (quaternion is None)):
raise ConfigurationPathError(
'Specify either euler or quaternion values, but not both.')
prev_pose = self._ik_target.get_pose()
self._ik_target.set_position(position, relative_to)
if euler is not None:
self._ik_target.set_orientation(euler, relative_to)
elif quaternion is not None:
self._ik_target.set_quaternion(quaternion, relative_to)
handles = [j.get_handle() for j in self.joints]
# Despite verbosity being set to 0, OMPL spits out a lot of text
with utils.suppress_std_out_and_err():
_, ret_floats, _, _ = utils.script_call(
'getLinearPath@PyRep', PYREP_SCRIPT_TYPE,
ints=[steps, self._ik_group, self._collision_collection,
int(ignore_collisions)] + handles)
self._ik_target.set_pose(prev_pose)
if len(ret_floats) == 0:
raise ConfigurationPathError('Could not create path.')
return ArmConfigurationPath(self, ret_floats)
def get_nonlinear_path(self, position: Union[List[float], np.ndarray],
euler: Union[List[float], np.ndarray] = None,
quaternion: Union[List[float], np.ndarray] = None,
ignore_collisions=False,
trials=100, max_configs=60, trials_per_goal=6,
algorithm=Algos.SBL, relative_to: Object = None
) -> ArmConfigurationPath:
"""Gets a non-linear (planned) configuration path given a target pose.
A path is generated by finding several configs for a pose, and ranking
them according to the distance in configuration space (smaller is
better).
Must specify either rotation in euler or quaternions, but not both!
:param position: The x, y, z position of the target.
:param euler: The x, y, z orientation of the target (in radians).
:param quaternion: A list containing the quaternion (x,y,z,w).
:param ignore_collisions: If collision checking should be disabled.
:param trials: The maximum number of attempts to reach max_configs
:param max_configs: The maximum number of configurations we want to
generate before ranking them.
:param trials_per_goal: The number of paths per config we want to trial.
:param algorithm: The algorithm for path planning to use.
:param relative_to: Indicates relative to which reference frame we want
the target pose. Specify None to retrieve the absolute pose,
or an Object relative to whose reference frame we want the pose.
:raises: ConfigurationPathError if no path could be created.
:return: A non-linear path in the arm configuration space.
"""
if not ((euler is None) ^ (quaternion is None)):
raise ConfigurationPathError(
'Specify either euler or quaternion values, but not both.')
prev_pose = self._ik_target.get_pose()
self._ik_target.set_position(position, relative_to)
if euler is not None:
self._ik_target.set_orientation(euler, relative_to)
elif quaternion is not None:
self._ik_target.set_quaternion(quaternion, relative_to)
handles = [j.get_handle() for j in self.joints]
# Despite verbosity being set to 0, OMPL spits out a lot of text
with utils.suppress_std_out_and_err():
_, ret_floats, _, _ = utils.script_call(
'getNonlinearPath@PyRep', PYREP_SCRIPT_TYPE,
ints=[self._ik_group, self._collision_collection,
int(ignore_collisions), trials, max_configs,
trials_per_goal] + handles, strings=[algorithm.value])
self._ik_target.set_pose(prev_pose)
if len(ret_floats) == 0:
raise ConfigurationPathError('Could not create path.')
return ArmConfigurationPath(self, ret_floats)
def get_path(self, position: Union[List[float], np.ndarray],
euler: Union[List[float], np.ndarray] = None,
quaternion: Union[List[float], np.ndarray] = None,
ignore_collisions=False,
trials=100, max_configs=60, trials_per_goal=6,
algorithm=Algos.SBL, relative_to: Object = None
) -> ArmConfigurationPath:
"""Tries to get a linear path, failing that tries a non-linear path.
Must specify either rotation in euler or quaternions, but not both!
:param position: The x, y, z position of the target.
:param euler: The x, y, z orientation of the target (in radians).
:param quaternion: A list containing the quaternion (x,y,z,w).
:param ignore_collisions: If collision checking should be disabled.
:param trials: The maximum number of attempts to reach max_configs.
(Only applicable if a non-linear path is needed)
:param max_configs: The maximum number of configurations we want to
generate before ranking them.
(Only applicable if a non-linear path is needed)
:param trials_per_goal: The number of paths per config we want to trial.
(Only applicable if a non-linear path is needed)
:param algorithm: The algorithm for path planning to use.
(Only applicable if a non-linear path is needed)
:param relative_to: Indicates relative to which reference frame we want
the target pose. Specify None to retrieve the absolute pose,
or an Object relative to whose reference frame we want the pose.
:raises: ConfigurationPathError if neither a linear or non-linear path
can be created.
:return: A linear or non-linear path in the arm configuration space.
"""
try:
p = self.get_linear_path(position, euler, quaternion,
ignore_collisions=ignore_collisions,
relative_to=relative_to)
return p
except ConfigurationPathError:
pass # Allowed. Try again, but with non-linear.
# This time if an exception is thrown, we dont want to catch it.
p = self.get_nonlinear_path(
position, euler, quaternion, ignore_collisions, trials, max_configs,
trials_per_goal, algorithm, relative_to=relative_to)
return p
def get_tip(self) -> Dummy:
"""Gets the tip of the arm.
Each arm is required to have a tip for path planning.
:return: The tip of the arm.
"""
return self._ik_tip
def get_jacobian(self):
"""Calculates the Jacobian.
:return: the row-major Jacobian matix.
"""
self._ik_target.set_matrix(self._ik_tip.get_matrix())
sim.simCheckIkGroup(self._ik_group,
[j.get_handle() for j in self.joints])
jacobian, (rows, cols) = sim.simGetIkGroupMatrix(self._ik_group, 0)
jacobian = np.array(jacobian).reshape((rows, cols), order='F')
return jacobian
| [
"pyrep.errors.ConfigurationPathError",
"pyrep.backend.sim.simSetIkGroupProperties",
"pyrep.backend.utils.suppress_std_out_and_err",
"pyrep.backend.sim.simGetIkGroupMatrix",
"pyrep.objects.dummy.Dummy",
"pyrep.backend.sim.simGetCollectionHandle",
"pyrep.errors.ConfigurationError",
"pyrep.robots.configu... | [((1318, 1355), 'pyrep.objects.dummy.Dummy', 'Dummy', (["('%s_target%s' % (name, suffix))"], {}), "('%s_target%s' % (name, suffix))\n", (1323, 1355), False, 'from pyrep.objects.dummy import Dummy\n'), ((1379, 1413), 'pyrep.objects.dummy.Dummy', 'Dummy', (["('%s_tip%s' % (name, suffix))"], {}), "('%s_tip%s' % (name, suffix))\n", (1384, 1413), False, 'from pyrep.objects.dummy import Dummy\n'), ((1439, 1490), 'pyrep.backend.sim.simGetIkGroupHandle', 'sim.simGetIkGroupHandle', (["('%s_ik%s' % (name, suffix))"], {}), "('%s_ik%s' % (name, suffix))\n", (1462, 1490), False, 'from pyrep.backend import sim, utils\n'), ((1528, 1583), 'pyrep.backend.sim.simGetCollectionHandle', 'sim.simGetCollectionHandle', (["('%s_arm%s' % (name, suffix))"], {}), "('%s_arm%s' % (name, suffix))\n", (1554, 1583), False, 'from pyrep.backend import sim, utils\n'), ((3130, 3272), 'pyrep.backend.sim.simSetIkGroupProperties', 'sim.simSetIkGroupProperties', ([], {'ikGroupHandle': 'self._ik_group', 'resolutionMethod': 'res_method', 'maxIterations': 'max_iterations', 'damping': 'dls_damping'}), '(ikGroupHandle=self._ik_group, resolutionMethod=\n res_method, maxIterations=max_iterations, damping=dls_damping)\n', (3157, 3272), False, 'from pyrep.backend import sim, utils\n'), ((8835, 8873), 'pyrep.robots.configuration_paths.arm_configuration_path.ArmConfigurationPath', 'ArmConfigurationPath', (['self', 'ret_floats'], {}), '(self, ret_floats)\n', (8855, 8873), False, 'from pyrep.robots.configuration_paths.arm_configuration_path import ArmConfigurationPath\n'), ((11542, 11580), 'pyrep.robots.configuration_paths.arm_configuration_path.ArmConfigurationPath', 'ArmConfigurationPath', (['self', 'ret_floats'], {}), '(self, ret_floats)\n', (11562, 11580), False, 'from pyrep.robots.configuration_paths.arm_configuration_path import ArmConfigurationPath\n'), ((14529, 14567), 'pyrep.robots.configuration_paths.arm_configuration_path.ArmConfigurationPath', 'ArmConfigurationPath', (['self', 'ret_floats'], {}), '(self, ret_floats)\n', (14549, 14567), False, 'from pyrep.robots.configuration_paths.arm_configuration_path import ArmConfigurationPath\n'), ((17572, 17614), 'pyrep.backend.sim.simGetIkGroupMatrix', 'sim.simGetIkGroupMatrix', (['self._ik_group', '(0)'], {}), '(self._ik_group, 0)\n', (17595, 17614), False, 'from pyrep.backend import sim, utils\n'), ((4953, 5040), 'pyrep.errors.ConfigurationPathError', 'ConfigurationPathError', (['"""Specify either euler or quaternion values, but not both."""'], {}), "(\n 'Specify either euler or quaternion values, but not both.')\n", (4975, 5040), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((5504, 5536), 'pyrep.backend.utils.suppress_std_out_and_err', 'utils.suppress_std_out_and_err', ([], {}), '()\n', (5534, 5536), False, 'from pyrep.backend import sim, utils\n'), ((5923, 6028), 'pyrep.errors.ConfigurationError', 'ConfigurationError', (['"""Could not find a valid joint configuration for desired end effector pose."""'], {}), "(\n 'Could not find a valid joint configuration for desired end effector pose.'\n )\n", (5941, 6028), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((7556, 7655), 'pyrep.errors.IKError', 'IKError', (['"""IK failed. Perhaps the distance was between the tip and target was too large."""'], {}), "(\n 'IK failed. Perhaps the distance was between the tip and target was too large.'\n )\n", (7563, 7655), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((8732, 8802), 'pyrep.errors.ConfigurationPathError', 'ConfigurationPathError', (['"""Could not create a path from cartesian path."""'], {}), "('Could not create a path from cartesian path.')\n", (8754, 8802), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((10557, 10644), 'pyrep.errors.ConfigurationPathError', 'ConfigurationPathError', (['"""Specify either euler or quaternion values, but not both."""'], {}), "(\n 'Specify either euler or quaternion values, but not both.')\n", (10579, 10644), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((11107, 11139), 'pyrep.backend.utils.suppress_std_out_and_err', 'utils.suppress_std_out_and_err', ([], {}), '()\n', (11137, 11139), False, 'from pyrep.backend import sim, utils\n'), ((11478, 11526), 'pyrep.errors.ConfigurationPathError', 'ConfigurationPathError', (['"""Could not create path."""'], {}), "('Could not create path.')\n", (11500, 11526), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((13460, 13547), 'pyrep.errors.ConfigurationPathError', 'ConfigurationPathError', (['"""Specify either euler or quaternion values, but not both."""'], {}), "(\n 'Specify either euler or quaternion values, but not both.')\n", (13482, 13547), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((14011, 14043), 'pyrep.backend.utils.suppress_std_out_and_err', 'utils.suppress_std_out_and_err', ([], {}), '()\n', (14041, 14043), False, 'from pyrep.backend import sim, utils\n'), ((14465, 14513), 'pyrep.errors.ConfigurationPathError', 'ConfigurationPathError', (['"""Could not create path."""'], {}), "('Could not create path.')\n", (14487, 14513), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((7751, 7779), 'pyrep.errors.IKError', 'IKError', (['"""IK not performed."""'], {}), "('IK not performed.')\n", (7758, 7779), False, 'from pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\n'), ((17634, 17652), 'numpy.array', 'np.array', (['jacobian'], {}), '(jacobian)\n', (17642, 17652), True, 'import numpy as np\n')] |
from keras import backend as K
import imutils
from keras.models import load_model
import numpy as np
import keras
import requests
from scipy.spatial import distance as dist
from imutils import face_utils
import time
import dlib
import cv2,os,sys
import collections
import random
import face_recognition
import pickle
import math
import threading
import tensorflow as tf
num_cores = 4
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,
inter_op_parallelism_threads=num_cores,
allow_soft_placement=True,
device_count = {'CPU' : num_CPU,
'GPU' : num_GPU}
)
session = tf.Session(config=config)
K.set_session(session)
class FacialLandMarksPosition:
"""
The indices points to the various facial features like left ear, right ear, nose, etc.,
that are mapped from the Facial Landmarks used by dlib's FacialLandmarks predictor.
"""
left_eye_start_index, left_eye_end_index = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
right_eye_start_index, right_eye_end_index = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
facial_landmarks_predictor = './models/68_face_landmarks_predictor.dat'
predictor = dlib.shape_predictor(facial_landmarks_predictor)
model = load_model('./models/weights.149-0.01.hdf5')
def predict_eye_state(model, image):
image = cv2.resize(image, (20, 10))
image = image.astype(dtype=np.float32)
image_batch = np.reshape(image, (1, 10, 20, 1))
image_batch = keras.applications.mobilenet.preprocess_input(image_batch)
return np.argmax( model.predict(image_batch)[0] )
cap = cv2.VideoCapture(0)
scale = 0.5
while(True):
c = time.time()
# Capture frame-by-frame
ret, frame = cap.read()
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_height, original_width = image.shape[:2]
resized_image = cv2.resize(image, (0, 0), fx=scale, fy=scale)
lab = cv2.cvtColor(resized_image, cv2.COLOR_BGR2LAB)
l, _, _ = cv2.split(lab)
resized_height, resized_width = l.shape[:2]
height_ratio, width_ratio = original_height / resized_height, original_width / resized_width
face_locations = face_recognition.face_locations(l, model='hog')
if len(face_locations):
top, right, bottom, left = face_locations[0]
x1, y1, x2, y2 = left, top, right, bottom
x1 = int(x1 * width_ratio)
y1 = int(y1 * height_ratio)
x2 = int(x2 * width_ratio)
y2 = int(y2 * height_ratio)
# draw face rectangle
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
shape = predictor(gray, dlib.rectangle(x1, y1, x2, y2))
face_landmarks = face_utils.shape_to_np(shape)
left_eye_indices = face_landmarks[FacialLandMarksPosition.left_eye_start_index:
FacialLandMarksPosition.left_eye_end_index]
(x, y, w, h) = cv2.boundingRect(np.array([left_eye_indices]))
left_eye = gray[y:y + h, x:x + w]
right_eye_indices = face_landmarks[FacialLandMarksPosition.right_eye_start_index:
FacialLandMarksPosition.right_eye_end_index]
(x, y, w, h) = cv2.boundingRect(np.array([right_eye_indices]))
right_eye = gray[y:y + h, x:x + w]
left_eye_open = 'yes' if predict_eye_state(model=model, image=left_eye) else 'no'
right_eye_open = 'yes' if predict_eye_state(model=model, image=right_eye) else 'no'
print('left eye open: {0} right eye open: {1}'.format(left_eye_open, right_eye_open))
if left_eye_open == 'yes' and right_eye_open == 'yes':
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
else:
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('right_eye', right_eye)
cv2.imshow('left_eye', left_eye)
cv2.imshow('frame', cv2.flip(frame, 1))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"tensorflow.ConfigProto",
"imutils.face_utils.shape_to_np",
"cv2.rectangle",
"dlib.rectangle",
"cv2.imshow",
"dlib.shape_predictor",
"cv2.cvtColor",
"cv2.split",
"numpy.reshape",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"keras.backend.set_session",
... | [((420, 596), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': 'num_cores', 'inter_op_parallelism_threads': 'num_cores', 'allow_soft_placement': '(True)', 'device_count': "{'CPU': num_CPU, 'GPU': num_GPU}"}), "(intra_op_parallelism_threads=num_cores,\n inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\n device_count={'CPU': num_CPU, 'GPU': num_GPU})\n", (434, 596), True, 'import tensorflow as tf\n'), ((741, 766), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (751, 766), True, 'import tensorflow as tf\n'), ((767, 789), 'keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (780, 789), True, 'from keras import backend as K\n'), ((1294, 1342), 'dlib.shape_predictor', 'dlib.shape_predictor', (['facial_landmarks_predictor'], {}), '(facial_landmarks_predictor)\n', (1314, 1342), False, 'import dlib\n'), ((1353, 1397), 'keras.models.load_model', 'load_model', (['"""./models/weights.149-0.01.hdf5"""'], {}), "('./models/weights.149-0.01.hdf5')\n", (1363, 1397), False, 'from keras.models import load_model\n'), ((1719, 1738), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1735, 1738), False, 'import cv2, os, sys\n'), ((4141, 4164), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4162, 4164), False, 'import cv2, os, sys\n'), ((1448, 1475), 'cv2.resize', 'cv2.resize', (['image', '(20, 10)'], {}), '(image, (20, 10))\n', (1458, 1475), False, 'import cv2, os, sys\n'), ((1546, 1579), 'numpy.reshape', 'np.reshape', (['image', '(1, 10, 20, 1)'], {}), '(image, (1, 10, 20, 1))\n', (1556, 1579), True, 'import numpy as np\n'), ((1598, 1656), 'keras.applications.mobilenet.preprocess_input', 'keras.applications.mobilenet.preprocess_input', (['image_batch'], {}), '(image_batch)\n', (1643, 1656), False, 'import keras\n'), ((1772, 1783), 'time.time', 'time.time', ([], {}), '()\n', (1781, 1783), False, 'import time\n'), ((1855, 1893), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (1867, 1893), False, 'import cv2, os, sys\n'), ((1970, 2015), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': 'scale', 'fy': 'scale'}), '(image, (0, 0), fx=scale, fy=scale)\n', (1980, 2015), False, 'import cv2, os, sys\n'), ((2027, 2073), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_image', 'cv2.COLOR_BGR2LAB'], {}), '(resized_image, cv2.COLOR_BGR2LAB)\n', (2039, 2073), False, 'import cv2, os, sys\n'), ((2089, 2103), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (2098, 2103), False, 'import cv2, os, sys\n'), ((2272, 2319), 'face_recognition.face_locations', 'face_recognition.face_locations', (['l'], {'model': '"""hog"""'}), "(l, model='hog')\n", (2303, 2319), False, 'import face_recognition\n'), ((2642, 2681), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2654, 2681), False, 'import cv2, os, sys\n'), ((2780, 2809), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (2802, 2809), False, 'from imutils import face_utils\n'), ((3904, 3938), 'cv2.imshow', 'cv2.imshow', (['"""right_eye"""', 'right_eye'], {}), "('right_eye', right_eye)\n", (3914, 3938), False, 'import cv2, os, sys\n'), ((3947, 3979), 'cv2.imshow', 'cv2.imshow', (['"""left_eye"""', 'left_eye'], {}), "('left_eye', left_eye)\n", (3957, 3979), False, 'import cv2, os, sys\n'), ((4005, 4023), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (4013, 4023), False, 'import cv2, os, sys\n'), ((2714, 2744), 'dlib.rectangle', 'dlib.rectangle', (['x1', 'y1', 'x2', 'y2'], {}), '(x1, y1, x2, y2)\n', (2728, 2744), False, 'import dlib\n'), ((3026, 3054), 'numpy.array', 'np.array', (['[left_eye_indices]'], {}), '([left_eye_indices])\n', (3034, 3054), True, 'import numpy as np\n'), ((3318, 3347), 'numpy.array', 'np.array', (['[right_eye_indices]'], {}), '([right_eye_indices])\n', (3326, 3347), True, 'import numpy as np\n'), ((3755, 3811), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (3768, 3811), False, 'import cv2, os, sys\n'), ((3838, 3894), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (3851, 3894), False, 'import cv2, os, sys\n'), ((4033, 4047), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4044, 4047), False, 'import cv2, os, sys\n')] |
# yellowbrick.text.freqdist
# Implementations of frequency distributions for text visualization.
#
# Author: <NAME>
# Created: Mon Feb 20 12:38:20 2017 -0500
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: freqdist.py [67b2740] <EMAIL> $
"""
Implementations of frequency distributions for text visualization
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from operator import itemgetter
from yellowbrick.text.base import TextVisualizer
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Quick Method
##########################################################################
def freqdist(
features, X, y=None, ax=None, n=50, orient="h", color=None, show=True, **kwargs
):
"""Displays frequency distribution plot for text.
This helper function is a quick wrapper to utilize the FreqDist
Visualizer (Transformer) for one-off analysis.
Parameters
----------
features : list, default: None
The list of feature names from the vectorizer, ordered by index. E.g.
a lexicon that specifies the unique vocabulary of the corpus. This
can be typically fetched using the ``get_feature_names()`` method of
the transformer in Scikit-Learn.
X: ndarray or DataFrame of shape n x m
A matrix of n instances with m features. In the case of text,
X is a list of list of already preprocessed words
y: ndarray or Series of length n
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
n: integer, default: 50
Top N tokens to be plotted.
orient : 'h' or 'v', default: 'h'
Specifies a horizontal or vertical bar chart.
color : string
Specify color for bars
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs: dict
Keyword arguments passed to the super class.
Returns
-------
visualizer: FreqDistVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
viz = FreqDistVisualizer(features, ax=ax, n=n, orient=orient, color=color, **kwargs)
# Fit and transform the visualizer (calls draw)
viz.fit(X, y, **kwargs)
viz.transform(X)
# Draw the final visualization
if show:
viz.show()
else:
viz.finalize()
# Return the visualizer object
return viz
class FrequencyVisualizer(TextVisualizer):
"""
A frequency distribution tells us the frequency of each vocabulary
item in the text. In general, it could count any kind of observable
event. It is a distribution because it tells us how the total
number of word tokens in the text are distributed across the
vocabulary items.
Parameters
----------
features : list, default: None
The list of feature names from the vectorizer, ordered by index. E.g.
a lexicon that specifies the unique vocabulary of the corpus. This
can be typically fetched using the ``get_feature_names()`` method of
the transformer in Scikit-Learn.
ax : matplotlib axes, default: None
The axes to plot the figure on.
n: integer, default: 50
Top N tokens to be plotted.
orient : 'h' or 'v', default: 'h'
Specifies a horizontal or vertical bar chart.
color : string
Specify color for bars
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
def __init__(self, features, ax=None, n=50, orient="h", color=None, **kwargs):
super(FreqDistVisualizer, self).__init__(ax=ax, **kwargs)
# Check that the orient is correct
orient = orient.lower().strip()
if orient not in {"h", "v"}:
raise YellowbrickValueError("Orientation must be 'h' or 'v'")
# Visualizer parameters
self.N = n
self.features = features
# Visual arguments
self.color = color
self.orient = orient
def count(self, X):
"""
Called from the fit method, this method gets all the
words from the corpus and their corresponding frequency
counts.
Parameters
----------
X : ndarray or masked ndarray
Pass in the matrix of vectorized documents, can be masked in
order to sum the word frequencies for only a subset of documents.
Returns
-------
counts : array
A vector containing the counts of all words in X (columns)
"""
# Sum on axis 0 (by columns), each column is a word
# Convert the matrix to an array
# Squeeze to remove the 1 dimension objects (like ravel)
return np.squeeze(np.asarray(X.sum(axis=0)))
def fit(self, X, y=None):
"""
The fit method is the primary drawing input for the frequency
distribution visualization. It requires vectorized lists of
documents and a list of features, which are the actual words
from the original corpus (needed to label the x-axis ticks).
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus
of frequency vectorized documents.
y : ndarray or DataFrame of shape n
Labels for the documents for conditional frequency distribution.
Notes
-----
.. note:: Text documents must be vectorized before ``fit()``.
"""
# Compute the conditional word frequency
if y is not None:
# Fit the frequencies
self.conditional_freqdist_ = {}
# Conditional frequency distribution
self.classes_ = [str(label) for label in set(y)]
for label in self.classes_:
self.conditional_freqdist_[label] = self.count(X[y == label])
else:
# No conditional frequencies
self.conditional_freqdist_ = None
# Frequency distribution of entire corpus.
self.freqdist_ = self.count(X)
self.sorted_ = self.freqdist_.argsort()[::-1] # Descending order
# Compute the number of words, vocab, and hapaxes
self.vocab_ = self.freqdist_.shape[0]
self.words_ = self.freqdist_.sum()
self.hapaxes_ = sum(1 for c in self.freqdist_ if c == 1)
# Draw and ensure that we return self
self.draw()
return self
def draw(self, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the distribution plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Prepare the data
bins = np.arange(self.N)
words = [self.features[i] for i in self.sorted_[: self.N]]
freqs = {}
# Set up the bar plots
if self.conditional_freqdist_:
for label, values in sorted(
self.conditional_freqdist_.items(), key=itemgetter(0)
):
freqs[label] = [values[i] for i in self.sorted_[: self.N]]
else:
freqs["corpus"] = [self.freqdist_[i] for i in self.sorted_[: self.N]]
# Draw a horizontal barplot
if self.orient == "h":
# Add the barchart, stacking if necessary
for label, freq in freqs.items():
self.ax.barh(bins, freq, label=label, color=self.color, align="center")
# Set the y ticks to the words
self.ax.set_yticks(bins)
self.ax.set_yticklabels(words)
# Order the features from top to bottom on the y axis
self.ax.invert_yaxis()
# Turn off y grid lines and turn on x grid lines
self.ax.yaxis.grid(False)
self.ax.xaxis.grid(True)
# Draw a vertical barplot
elif self.orient == "v":
# Add the barchart, stacking if necessary
for label, freq in freqs.items():
self.ax.bar(bins, freq, label=label, color=self.color, align="edge")
# Set the y ticks to the words
self.ax.set_xticks(bins)
self.ax.set_xticklabels(words, rotation=90)
# Turn off x grid lines and turn on y grid lines
self.ax.yaxis.grid(True)
self.ax.xaxis.grid(False)
# Unknown state
else:
raise YellowbrickValueError("Orientation must be 'h' or 'v'")
return self.ax
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls show & show calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Set the title
self.set_title("Frequency Distribution of Top {} tokens".format(self.N))
# Create the vocab, count, and hapaxes labels
infolabel = "vocab: {:,}\nwords: {:,}\nhapax: {:,}".format(
self.vocab_, self.words_, self.hapaxes_
)
self.ax.text(
0.68,
0.97,
infolabel,
transform=self.ax.transAxes,
fontsize=9,
verticalalignment="top",
bbox={"boxstyle": "round", "facecolor": "white", "alpha": 0.8},
)
# Set the legend and the grid
self.ax.legend(loc="upper right", frameon=True)
# Backwards compatibility alias
FreqDistVisualizer = FrequencyVisualizer
| [
"operator.itemgetter",
"numpy.arange",
"yellowbrick.exceptions.YellowbrickValueError"
] | [((7314, 7331), 'numpy.arange', 'np.arange', (['self.N'], {}), '(self.N)\n', (7323, 7331), True, 'import numpy as np\n'), ((4317, 4372), 'yellowbrick.exceptions.YellowbrickValueError', 'YellowbrickValueError', (['"""Orientation must be \'h\' or \'v\'"""'], {}), '("Orientation must be \'h\' or \'v\'")\n', (4338, 4372), False, 'from yellowbrick.exceptions import YellowbrickValueError\n'), ((8989, 9044), 'yellowbrick.exceptions.YellowbrickValueError', 'YellowbrickValueError', (['"""Orientation must be \'h\' or \'v\'"""'], {}), '("Orientation must be \'h\' or \'v\'")\n', (9010, 9044), False, 'from yellowbrick.exceptions import YellowbrickValueError\n'), ((7586, 7599), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (7596, 7599), False, 'from operator import itemgetter\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Re-Run KNearestNeighbors for Journal Recommendation
# This notebook is designed to predict journals based on an updated version of document vector generation. Before I was doing a simple token analysis via breaking each sentence based on whitespace (e.g. ' '), but now I incorporated Spacy with lemma generation. To simplify running the recommendation notebook all over again, I'm just using the 300 dimensions to train a KNN-model and to compare its performance against a random baseline.
# +
from pathlib import Path
import numpy as np
import pandas as pd
import plotnine as p9
from sklearn.dummy import DummyClassifier
from sklearn.neighbors import KNeighborsClassifier
from tqdm import tqdm_notebook
from annorxiver_modules.journal_rec_helper import (
cross_validation,
dummy_evaluate,
knn_evaluate,
knn_centroid_evaluate,
)
# -
# # Load bioRxiv Papers
biorxiv_journal_df = (
pd.read_csv(
Path("../..")
/ Path("biorxiv")
/ Path("journal_tracker")
/ Path("output/mapped_published_doi.tsv"),
sep="\t",
)
.groupby("preprint_doi")
.agg(
{
"document": "last",
"category": "first",
"preprint_doi": "last",
"published_doi": "first",
"pmcid": "first",
}
)
.reset_index(drop=True)
)
biorxiv_journal_df.head()
# Count number of Non-NaN elements
print(f"Number of Non-NaN entries: {biorxiv_journal_df.pmcid.count()}")
print(f"Total number of entries: {biorxiv_journal_df.shape[0]}")
print(
f"Percent Covered: {(biorxiv_journal_df.pmcid.count()/biorxiv_journal_df.shape[0])*100:.2f}%"
)
golden_set_df = biorxiv_journal_df.query("pmcid.notnull()")
golden_set_df.head()
# # Load PubMed Central Papers
pmc_articles_df = pd.read_csv(
Path("../exploratory_data_analysis")
/ Path("output")
/ Path("pubmed_central_journal_paper_map.tsv.xz"),
sep="\t",
).query("article_type=='research-article'")
print(pmc_articles_df.shape)
pmc_articles_df.head()
journals = pmc_articles_df.journal.value_counts()
print(journals.shape)
journals
# Filter out low count journals
pmc_articles_df = pmc_articles_df.query(
f"journal in {journals[journals > 100].index.tolist()}"
)
print(pmc_articles_df.shape)
pmc_articles_df.head()
pmc_embedding_df = pd.read_csv(
"../word_vector_experiment/output/pmc_document_vectors_300_replace.tsv.xz", sep="\t"
)
pmc_embedding_df.head()
full_dataset_df = (
pmc_articles_df.query(f"pmcid not in {golden_set_df.pmcid.tolist()}")[["pmcid"]]
.merge(pmc_embedding_df, left_on="pmcid", right_on="document")
.drop("pmcid", axis=1)
.set_index("document")
)
full_dataset_df.head()
subsampled_df = (
pmc_articles_df.query(f"pmcid not in {golden_set_df.pmcid.tolist()}")[["pmcid"]]
.merge(pmc_embedding_df, left_on="pmcid", right_on="document")
.drop("pmcid", axis=1)
.groupby("journal", group_keys=False)
.apply(lambda x: x.sample(min(len(x), 100), random_state=100))
.set_index("document")
)
subsampled_df.head()
# # Train Similarity Search System
knn_model = KNeighborsClassifier(n_neighbors=10)
# ## Random Journal Prediction
model = DummyClassifier(strategy="uniform")
_ = cross_validation(
model, subsampled_df, dummy_evaluate, cv=10, random_state=100, top_predictions=10
)
# ## Centroid Prediction
_ = cross_validation(
knn_model, subsampled_df, knn_centroid_evaluate, cv=10, random_state=100
)
# ## Paper by Paper prediction
_ = cross_validation(knn_model, subsampled_df, knn_evaluate, cv=10, random_state=100)
# # Gold Set Analysis
biorxiv_embeddings_df = pd.read_csv(
Path(
"../../biorxiv/word_vector_experiment/output/word2vec_output/biorxiv_all_articles_300.tsv.xz"
).resolve(),
sep="\t",
)
biorxiv_embeddings_df.head()
golden_dataset = (
golden_set_df[["document", "pmcid"]]
.merge(pmc_articles_df[["journal", "pmcid"]], on="pmcid")
.merge(biorxiv_embeddings_df, on="document")
)
golden_dataset.head()
model = DummyClassifier(strategy="uniform")
_ = cross_validation(
model,
golden_dataset.drop(["pmcid", "document"], axis=1),
dummy_evaluate,
cv=10,
random_state=100,
top_predictions=10,
)
# ## Centroid Analysis
predictions, true_labels = knn_centroid_evaluate(
knn_model, subsampled_df, golden_dataset.drop(["pmcid", "document"], axis=1)
)
accs = [
(1 if true_labels[data_idx] in prediction_row else 0)
for data_idx, prediction_row in enumerate(predictions)
]
print(f"{np.sum(accs)} out of {len(accs)}")
print(f"{np.mean(accs)*100}% correct")
# ## Paper by Paper analysis
predictions, true_labels = knn_evaluate(
knn_model, subsampled_df, golden_dataset.drop(["pmcid", "document"], axis=1)
)
accs = [
(1 if true_labels[data_idx] in prediction_row else 0)
for data_idx, prediction_row in enumerate(predictions)
]
print(f"{np.sum(accs)} out of {len(accs)}")
print(f"{np.mean(accs)*100}% correct")
# # Save Entire Dataset
(
pmc_articles_df[["pmcid"]]
.merge(pmc_embedding_df, left_on="pmcid", right_on="document")
.drop("pmcid", axis=1)
.to_csv(
"output/paper_dataset/paper_dataset_full.tsv.xz",
sep="\t",
compression="xz",
index=False,
)
)
# +
cols = dict(document="first")
cols.update({col: "mean" for col in pmc_embedding_df if "feat" in col})
(
pmc_articles_df[["pmcid"]]
.merge(pmc_embedding_df, left_on="pmcid", right_on="document")
.drop(["pmcid"], axis=1)
.groupby("journal")
.agg(cols)
.reset_index()
.to_csv("output/paper_dataset/centroid.tsv", sep="\t", index=False)
)
| [
"sklearn.dummy.DummyClassifier",
"numpy.sum",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"pathlib.Path",
"numpy.mean",
"annorxiver_modules.journal_rec_helper.cross_validation"
] | [((2643, 2749), 'pandas.read_csv', 'pd.read_csv', (['"""../word_vector_experiment/output/pmc_document_vectors_300_replace.tsv.xz"""'], {'sep': '"""\t"""'}), "(\n '../word_vector_experiment/output/pmc_document_vectors_300_replace.tsv.xz',\n sep='\\t')\n", (2654, 2749), True, 'import pandas as pd\n'), ((3429, 3465), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(10)'}), '(n_neighbors=10)\n', (3449, 3465), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3507, 3542), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""uniform"""'}), "(strategy='uniform')\n", (3522, 3542), False, 'from sklearn.dummy import DummyClassifier\n'), ((3548, 3652), 'annorxiver_modules.journal_rec_helper.cross_validation', 'cross_validation', (['model', 'subsampled_df', 'dummy_evaluate'], {'cv': '(10)', 'random_state': '(100)', 'top_predictions': '(10)'}), '(model, subsampled_df, dummy_evaluate, cv=10, random_state=\n 100, top_predictions=10)\n', (3564, 3652), False, 'from annorxiver_modules.journal_rec_helper import cross_validation, dummy_evaluate, knn_evaluate, knn_centroid_evaluate\n'), ((3685, 3779), 'annorxiver_modules.journal_rec_helper.cross_validation', 'cross_validation', (['knn_model', 'subsampled_df', 'knn_centroid_evaluate'], {'cv': '(10)', 'random_state': '(100)'}), '(knn_model, subsampled_df, knn_centroid_evaluate, cv=10,\n random_state=100)\n', (3701, 3779), False, 'from annorxiver_modules.journal_rec_helper import cross_validation, dummy_evaluate, knn_evaluate, knn_centroid_evaluate\n'), ((3819, 3904), 'annorxiver_modules.journal_rec_helper.cross_validation', 'cross_validation', (['knn_model', 'subsampled_df', 'knn_evaluate'], {'cv': '(10)', 'random_state': '(100)'}), '(knn_model, subsampled_df, knn_evaluate, cv=10,\n random_state=100)\n', (3835, 3904), False, 'from annorxiver_modules.journal_rec_helper import cross_validation, dummy_evaluate, knn_evaluate, knn_centroid_evaluate\n'), ((4341, 4376), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""uniform"""'}), "(strategy='uniform')\n", (4356, 4376), False, 'from sklearn.dummy import DummyClassifier\n'), ((3966, 4075), 'pathlib.Path', 'Path', (['"""../../biorxiv/word_vector_experiment/output/word2vec_output/biorxiv_all_articles_300.tsv.xz"""'], {}), "(\n '../../biorxiv/word_vector_experiment/output/word2vec_output/biorxiv_all_articles_300.tsv.xz'\n )\n", (3970, 4075), False, 'from pathlib import Path\n'), ((4843, 4855), 'numpy.sum', 'np.sum', (['accs'], {}), '(accs)\n', (4849, 4855), True, 'import numpy as np\n'), ((5211, 5223), 'numpy.sum', 'np.sum', (['accs'], {}), '(accs)\n', (5217, 5223), True, 'import numpy as np\n'), ((2194, 2241), 'pathlib.Path', 'Path', (['"""pubmed_central_journal_paper_map.tsv.xz"""'], {}), "('pubmed_central_journal_paper_map.tsv.xz')\n", (2198, 2241), False, 'from pathlib import Path\n'), ((4887, 4900), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (4894, 4900), True, 'import numpy as np\n'), ((5255, 5268), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (5262, 5268), True, 'import numpy as np\n'), ((2130, 2166), 'pathlib.Path', 'Path', (['"""../exploratory_data_analysis"""'], {}), "('../exploratory_data_analysis')\n", (2134, 2166), False, 'from pathlib import Path\n'), ((2173, 2187), 'pathlib.Path', 'Path', (['"""output"""'], {}), "('output')\n", (2177, 2187), False, 'from pathlib import Path\n'), ((1345, 1384), 'pathlib.Path', 'Path', (['"""output/mapped_published_doi.tsv"""'], {}), "('output/mapped_published_doi.tsv')\n", (1349, 1384), False, 'from pathlib import Path\n'), ((1311, 1334), 'pathlib.Path', 'Path', (['"""journal_tracker"""'], {}), "('journal_tracker')\n", (1315, 1334), False, 'from pathlib import Path\n'), ((1261, 1274), 'pathlib.Path', 'Path', (['"""../.."""'], {}), "('../..')\n", (1265, 1274), False, 'from pathlib import Path\n'), ((1285, 1300), 'pathlib.Path', 'Path', (['"""biorxiv"""'], {}), "('biorxiv')\n", (1289, 1300), False, 'from pathlib import Path\n')] |
"""
Visualize Q-values and gradients of a good Ant policy.
"""
import joblib
import argparse
import numpy as np
import matplotlib.pyplot as plt
import rlkit.torch.pytorch_util as ptu
PATH = '/home/vitchyr/git/railrl/data/doodads3/01-26-ddpg-sweep-harder-tasks/01-26-ddpg-sweep-harder-tasks-id10-s68123/params.pkl'
BAD_POLICY_PATH = '/home/vitchyr/git/railrl/data/doodads3/01-26-ddpg-sweep-harder-tasks/01-26-ddpg-sweep-harder-tasks-id3-s32528/params.pkl'
UNSTABLE_POLCIY_PATH = '/home/vitchyr/git/railrl/data/doodads3/01-26-ddpg-sweep-harder-tasks/01-26-ddpg-sweep-harder-tasks-id9-s20629/params.pkl'
def visualize_qf_slice(qf, env):
ob = env.reset()
sampled_action = env.action_space.sample()
low = env.action_space.low
high = env.action_space.high
N = 100
a0_values, da = np.linspace(low[0], high[0], N, retstep=True)
num_dim = low.size
for i in range(num_dim):
q_values = []
q_gradients = []
action = sampled_action.copy()
for a0_value in a0_values:
action[i] = a0_value
ob_pt = ptu.np_to_var(ob[None])
action_pt = ptu.np_to_var(action[None], requires_grad=True)
q_val = qf(ob_pt, action_pt)
q_val.sum().backward()
q_values.append(ptu.get_numpy(q_val)[0, 0])
q_gradients.append(ptu.get_numpy(action_pt.grad)[0, i])
q_values = np.array(q_values)
q_gradients = np.array(q_gradients)
empirical_gradients = np.gradient(q_values, da)
plt.subplot(num_dim, 2, i*2 + 1)
plt.plot(a0_values, q_values, label='values')
plt.xlabel("action slice")
plt.ylabel("q-value")
plt.title("dimension {}".format(i))
plt.subplot(num_dim, 2, i*2 + 2)
plt.plot(a0_values, empirical_gradients, label='empirical gradients')
plt.plot(a0_values, q_gradients, label='actual gradients')
plt.xlabel("action slice")
plt.ylabel("dq/da")
plt.title("dimension {}".format(i))
plt.legend()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str,
default=UNSTABLE_POLCIY_PATH,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=30, help='Horizon for eval')
args = parser.parse_args()
data = joblib.load(args.file)
qf = data['qf']
env = data['env']
visualize_qf_slice(qf, env)
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"rlkit.torch.pytorch_util.get_numpy",
"matplotlib.pyplot.legend",
"rlkit.torch.pytorch_util.np_to_var",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"joblib.load",
"matpl... | [((802, 847), 'numpy.linspace', 'np.linspace', (['low[0]', 'high[0]', 'N'], {'retstep': '(True)'}), '(low[0], high[0], N, retstep=True)\n', (813, 847), True, 'import numpy as np\n'), ((2032, 2042), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2040, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2108, 2110), False, 'import argparse\n'), ((2388, 2410), 'joblib.load', 'joblib.load', (['args.file'], {}), '(args.file)\n', (2399, 2410), False, 'import joblib\n'), ((1391, 1409), 'numpy.array', 'np.array', (['q_values'], {}), '(q_values)\n', (1399, 1409), True, 'import numpy as np\n'), ((1432, 1453), 'numpy.array', 'np.array', (['q_gradients'], {}), '(q_gradients)\n', (1440, 1453), True, 'import numpy as np\n'), ((1484, 1509), 'numpy.gradient', 'np.gradient', (['q_values', 'da'], {}), '(q_values, da)\n', (1495, 1509), True, 'import numpy as np\n'), ((1518, 1552), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_dim', '(2)', '(i * 2 + 1)'], {}), '(num_dim, 2, i * 2 + 1)\n', (1529, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1604), 'matplotlib.pyplot.plot', 'plt.plot', (['a0_values', 'q_values'], {'label': '"""values"""'}), "(a0_values, q_values, label='values')\n", (1567, 1604), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""action slice"""'], {}), "('action slice')\n", (1623, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""q-value"""'], {}), "('q-value')\n", (1658, 1669), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1756), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_dim', '(2)', '(i * 2 + 2)'], {}), '(num_dim, 2, i * 2 + 2)\n', (1733, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1763, 1832), 'matplotlib.pyplot.plot', 'plt.plot', (['a0_values', 'empirical_gradients'], {'label': '"""empirical gradients"""'}), "(a0_values, empirical_gradients, label='empirical gradients')\n", (1771, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1841, 1899), 'matplotlib.pyplot.plot', 'plt.plot', (['a0_values', 'q_gradients'], {'label': '"""actual gradients"""'}), "(a0_values, q_gradients, label='actual gradients')\n", (1849, 1899), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""action slice"""'], {}), "('action slice')\n", (1918, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1962), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dq/da"""'], {}), "('dq/da')\n", (1953, 1962), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2027), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2025, 2027), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1097), 'rlkit.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['ob[None]'], {}), '(ob[None])\n', (1087, 1097), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1122, 1169), 'rlkit.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['action[None]'], {'requires_grad': '(True)'}), '(action[None], requires_grad=True)\n', (1135, 1169), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1275, 1295), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_val'], {}), '(q_val)\n', (1288, 1295), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1334, 1363), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['action_pt.grad'], {}), '(action_pt.grad)\n', (1347, 1363), True, 'import rlkit.torch.pytorch_util as ptu\n')] |
from __future__ import print_function, absolute_import
import time
from collections import OrderedDict
import torch
import numpy as np
from .utils import to_torch
from .evaluation_metrics import cmc, mean_ap
from reid.loss.qaconv import QAConv
from .tlift import TLift
from .rerank import re_ranking
def pre_tlift(gallery, query):
gal_cam_id = np.array([cam for _, _, cam, _ in gallery])
gal_time = np.array([frame_time for _, _, _, frame_time in gallery])
prob_cam_id = np.array([cam for _, _, cam, _ in query])
prob_time = np.array([frame_time for _, _, _, frame_time in query])
return {'gal_cam_id': gal_cam_id, 'gal_time': gal_time,
'prob_cam_id': prob_cam_id, 'prob_time': prob_time,
'num_cams': gal_cam_id.max() + 1}
def extract_cnn_feature(model, inputs):
model = model.cuda().eval()
inputs = to_torch(inputs).cuda()
with torch.no_grad():
outputs = model(inputs)
outputs = outputs.data.cpu()
return outputs
def extract_features(model, data_loader):
fea_time = 0
data_time = 0
features = OrderedDict()
labels = OrderedDict()
end = time.time()
print('Extract Features...', end='\t')
for i, (imgs, fnames, pids, _) in enumerate(data_loader):
data_time += time.time() - end
end = time.time()
outputs = extract_cnn_feature(model, imgs)
for fname, output, pid in zip(fnames, outputs, pids):
features[fname] = output
labels[fname] = pid
fea_time += time.time() - end
end = time.time()
print('Feature time: {:.3f} seconds. Data time: {:.3f} seconds.'.format(fea_time, data_time))
return features, labels
def pairwise_distance(gal_fea, prob_fea, qaconv_layer, gal_batch_size=128,
prob_batch_size=4096, transpose=False):
with torch.no_grad():
num_gals = gal_fea.size(0)
num_probs = prob_fea.size(0)
score = torch.zeros(num_gals, num_probs, device=prob_fea.device)
for i in range(0, num_probs, prob_batch_size):
j = min(i + prob_batch_size, num_probs)
qaconv = torch.nn.DataParallel(QAConv(prob_fea[i: j, :, :, :].cuda(), qaconv_layer)).cuda().eval()
for k in range(0, num_gals, gal_batch_size):
k2 = min(k + gal_batch_size, num_gals)
score[k: k2, i: j] = qaconv(gal_fea[k: k2, :, :, :].cuda())
if transpose:
dist = (1. - score.t()).cpu().numpy() # [p, g]
else:
dist = (1. - score).cpu().numpy() # [g, p]
return dist
def evaluate_all(distmat, query=None, gallery=None,
query_ids=None, gallery_ids=None,
query_cams=None, gallery_cams=None,
cmc_topk=(1, 5, 10, 20)):
if query is not None and gallery is not None:
query_ids = [pid for _, pid, _, _ in query]
gallery_ids = [pid for _, pid, _, _ in gallery]
query_cams = [cam for _, _, cam, _ in query]
gallery_cams = [cam for _, _, cam, _ in gallery]
else:
assert (query_ids is not None and gallery_ids is not None
and query_cams is not None and gallery_cams is not None)
# Compute mean AP
mAP = mean_ap(distmat, query_ids, gallery_ids, query_cams, gallery_cams)
print('Mean AP: {:4.1%}'.format(mAP))
# Compute CMC scores
cmc_configs = {
'market1501': dict(separate_camera_set=False,
single_gallery_shot=False,
first_match_break=True)}
cmc_scores = {name: cmc(distmat, query_ids, gallery_ids,
query_cams, gallery_cams, **params)
for name, params in cmc_configs.items()}
print('CMC Scores')
for k in cmc_topk:
print(' top-{:<4}{:12.1%}'
.format(k, cmc_scores['market1501'][k - 1]))
return cmc_scores['market1501'][0], mAP
class Evaluator(object):
def __init__(self, model):
super(Evaluator, self).__init__()
self.model = model
def evaluate(self, query_loader, gallery_loader, testset, qaconv_layer, gal_batch_size=128,
prob_batch_size=4096, tau=100, sigma=100, K=100, alpha=0.1):
query = testset.query
gallery = testset.gallery
prob_fea, _ = extract_features(self.model, query_loader)
prob_fea = torch.cat([prob_fea[f].unsqueeze(0) for f, _, _, _ in query], 0)
gal_fea, _ = extract_features(self.model, gallery_loader)
gal_fea = torch.cat([gal_fea[f].unsqueeze(0) for f, _, _, _ in gallery], 0)
print('Compute similarity...', end='\t')
start = time.time()
dist = pairwise_distance(gal_fea, prob_fea, qaconv_layer, gal_batch_size, prob_batch_size,
transpose=True) # [p, g]
print('Time: %.3f seconds.' % (time.time() - start))
rank1, mAP = evaluate_all(dist, query=query, gallery=gallery)
print('Compute similarity for rerank...', end='\t')
start = time.time()
q_q_dist = pairwise_distance(prob_fea, prob_fea, qaconv_layer, gal_batch_size, prob_batch_size)
g_g_dist = pairwise_distance(gal_fea, gal_fea, qaconv_layer, gal_batch_size, prob_batch_size)
dist_rerank = re_ranking(dist, q_q_dist, g_g_dist)
print('Time: %.3f seconds.' % (time.time() - start))
rank1_rerank, mAP_rerank = evaluate_all(dist_rerank, query=query, gallery=gallery)
score_rerank = 1 - dist_rerank
if testset.has_time_info:
print('Compute TLift...', end='\t')
start = time.time()
pre_tlift_dict = pre_tlift(gallery, query)
score_tlift = TLift(score_rerank, tau=tau, sigma=sigma, K=K, alpha=alpha,
**pre_tlift_dict)
print('Time: %.3f seconds.' % (time.time() - start))
dist_tlift = 1 - score_tlift
rank1_tlift, mAP_tlift = evaluate_all(dist_tlift, query=query, gallery=gallery)
else:
pre_tlift_dict = {'gal_time': 0, 'prob_time': 0}
dist_tlift = 0
rank1_tlift = rank1_rerank
mAP_tlift = mAP_rerank
return rank1, mAP, rank1_rerank, mAP_rerank, rank1_tlift, mAP_tlift, dist, dist_rerank, \
dist_tlift, pre_tlift_dict
| [
"torch.zeros",
"time.time",
"numpy.array",
"collections.OrderedDict",
"torch.no_grad"
] | [((352, 395), 'numpy.array', 'np.array', (['[cam for _, _, cam, _ in gallery]'], {}), '([cam for _, _, cam, _ in gallery])\n', (360, 395), True, 'import numpy as np\n'), ((411, 468), 'numpy.array', 'np.array', (['[frame_time for _, _, _, frame_time in gallery]'], {}), '([frame_time for _, _, _, frame_time in gallery])\n', (419, 468), True, 'import numpy as np\n'), ((487, 528), 'numpy.array', 'np.array', (['[cam for _, _, cam, _ in query]'], {}), '([cam for _, _, cam, _ in query])\n', (495, 528), True, 'import numpy as np\n'), ((545, 600), 'numpy.array', 'np.array', (['[frame_time for _, _, _, frame_time in query]'], {}), '([frame_time for _, _, _, frame_time in query])\n', (553, 600), True, 'import numpy as np\n'), ((1087, 1100), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1098, 1100), False, 'from collections import OrderedDict\n'), ((1114, 1127), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1125, 1127), False, 'from collections import OrderedDict\n'), ((1138, 1149), 'time.time', 'time.time', ([], {}), '()\n', (1147, 1149), False, 'import time\n'), ((892, 907), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (905, 907), False, 'import torch\n'), ((1309, 1320), 'time.time', 'time.time', ([], {}), '()\n', (1318, 1320), False, 'import time\n'), ((1557, 1568), 'time.time', 'time.time', ([], {}), '()\n', (1566, 1568), False, 'import time\n'), ((1845, 1860), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1858, 1860), False, 'import torch\n'), ((1950, 2006), 'torch.zeros', 'torch.zeros', (['num_gals', 'num_probs'], {'device': 'prob_fea.device'}), '(num_gals, num_probs, device=prob_fea.device)\n', (1961, 2006), False, 'import torch\n'), ((4650, 4661), 'time.time', 'time.time', ([], {}), '()\n', (4659, 4661), False, 'import time\n'), ((5028, 5039), 'time.time', 'time.time', ([], {}), '()\n', (5037, 5039), False, 'import time\n'), ((1277, 1288), 'time.time', 'time.time', ([], {}), '()\n', (1286, 1288), False, 'import time\n'), ((1525, 1536), 'time.time', 'time.time', ([], {}), '()\n', (1534, 1536), False, 'import time\n'), ((5599, 5610), 'time.time', 'time.time', ([], {}), '()\n', (5608, 5610), False, 'import time\n'), ((4859, 4870), 'time.time', 'time.time', ([], {}), '()\n', (4868, 4870), False, 'import time\n'), ((5344, 5355), 'time.time', 'time.time', ([], {}), '()\n', (5353, 5355), False, 'import time\n'), ((5845, 5856), 'time.time', 'time.time', ([], {}), '()\n', (5854, 5856), False, 'import time\n')] |
import numpy as np
import pyamg
import scipy.sparse
def get_gradients(I):
"""Get the vertical (derivative-row) and horizontal (derivative-column) gradients
of an image."""
I_y = np.zeros(I.shape)
I_y[1:, :, ...] = I[1:, :, ...] - I[:-1, :, ...]
I_x = np.zeros(I.shape)
I_x[:, 1:, ...] = I[:, 1:, ...] - I[:, :-1, ...]
return I_y, I_x
def solve(t_y, t_x, mask, t_y_weights=None, t_x_weights=None):
"""Solve for the image which best matches the target vertical gradients
t_y and horizontal gradients t_x, e.g. the one which minimizes sum of squares
of the residual
sum of (I[i,j] - I[i-1,j] - t_y[i,j])**2 + (I[i,j] - I[i,j-1] - t_x[i,j])**2
Only considers the target gradients lying entirely within the mask.
The first row of t_y and the first column of t_x are ignored. Optionally,
you may pass in an array with the weights corresponding to each target
gradient. The solution is unique up to a constant added to each of
the pixels. """
if t_y_weights is None:
t_y_weights = np.ones(t_y.shape)
if t_x_weights is None:
t_x_weights = np.ones(t_x.shape)
M, N = mask.shape
numbers = get_numbers(mask)
A = get_A(mask, t_y_weights, t_x_weights)
b = get_b(t_y, t_x, mask, t_y_weights, t_x_weights)
solver = pyamg.ruge_stuben_solver(A)
x = solver.solve(b)
I = np.zeros(mask.shape)
for i in range(M):
for j in range(N):
I[i,j] = x[numbers[i,j]]
return I
def solve_L1(t_y, t_x, mask):
"""Same as solve(), except using an L1 penalty rather than least squares."""
EPSILON = 0.0001
# We minimize the L1-norm of the residual
#
# sum of |r_i|
# r = Ax - b
#
# by alternately minimizing the variational upper bound
#
# |r_i| <= a_i * r_i**2 + 1 / (4 * a_i)
#
# with respect to x and a. When r is fixed, this bound is tight for a = 1 / (2 * r).
# When a is fixed, we optimize for x by solving a weighted least-squares problem.
I = solve(t_y, t_x, mask)
for i in range(20):
I_y, I_x = get_gradients(I)
t_y_err = mask * np.abs(I_y - t_y)
t_x_err = mask * np.abs(I_x - t_x)
t_y_weights = 1. / (2. * np.clip(t_y_err, EPSILON, np.infty))
t_x_weights = 1. / (2. * np.clip(t_x_err, EPSILON, np.infty))
try:
I = solve(t_y, t_x, mask, t_y_weights, t_x_weights)
except:
# Occasionally the solver fails when the weights get very large
# or small. In that case, we just return the previous iteration's
# estimate, which is hopefully close enough.
return I
return I
###################### Stuff below here not very readable ##########################
def get_numbers(mask):
M, N = mask.shape
numbers = np.zeros(mask.shape, dtype=int)
count = 0
for i in range(M):
for j in range(N):
if mask[i,j]:
numbers[i, j] = count
count += 1
return numbers
def get_b(t_y, t_x, mask, t_y_weights, t_x_weights):
M, N = mask.shape
t_y = t_y[1:, :]
t_y_weights = t_y_weights[1:, :]
t_x = t_x[:, 1:]
t_x_weights = t_x_weights[:, 1:]
numbers = get_numbers(mask)
K = np.max(numbers) + 1
b = np.zeros(K)
# horizontal derivatives
for i in range(M):
for j in range(N-1):
if mask[i,j] and mask[i,j+1]:
n1 = numbers[i,j]
n2 = numbers[i,j+1]
# row (i,j): -x_{i,j+1} + x_{i,j} + t
b[n1] -= t_x[i,j] * t_x_weights[i,j]
# row (i, j+1): x_{i,j+1} - x_{i,j} - t
b[n2] += t_x[i,j] * t_x_weights[i,j]
# vertical derivatives
for i in range(M-1):
for j in range(N):
if mask[i,j] and mask[i+1,j]:
n1 = numbers[i,j]
n2 = numbers[i+1,j]
# row (i,j): -x_{i+1,j} + x_{i,j} + t
b[n1] -= t_y[i,j] * t_y_weights[i,j]
# row (i, j+1): x_{i+1,j} - x_{i,j} - t
b[n2] += t_y[i,j] * t_y_weights[i,j]
return b
def get_A(mask, t_y_weights, t_x_weights):
M, N = mask.shape
numbers = get_numbers(mask)
K = np.max(numbers) + 1
t_y_weights = t_y_weights[1:, :]
t_x_weights = t_x_weights[:, 1:]
# horizontal derivatives
count = 0
for i in range(M):
for j in range(N-1):
if mask[i,j] and mask[i,j+1]:
count += 1
data = np.zeros(4*count)
row = np.zeros(4*count)
col = np.zeros(4*count)
count = 0
for i in range(M):
for j in range(N-1):
if mask[i,j] and mask[i,j+1]:
n1 = numbers[i,j]
n2 = numbers[i,j+1]
# row (i,j): -x_{i,j+1} + x_{i,j} + t
row[4*count] = n1
col[4*count] = n2
data[4*count] = -t_x_weights[i, j]
row[4*count+1] = n1
col[4*count+1] = n1
data[4*count+1] = t_x_weights[i, j]
# row (i, j+1): x_{i,j+1} - x_{i,j} - t
row[4*count+2] = n2
col[4*count+2] = n2
data[4*count+2] = t_x_weights[i, j]
row[4*count+3] = n2
col[4*count+3] = n1
data[4*count+3] = -t_x_weights[i, j]
count += 1
data1 = data
row1 = row
col1 = col
# vertical derivatives
count = 0
for i in range(M-1):
for j in range(N):
if mask[i,j] and mask[i+1,j]:
count += 1
data = np.zeros(4*count)
row = np.zeros(4*count)
col = np.zeros(4*count)
count = 0
for i in range(M-1):
for j in range(N):
if mask[i,j] and mask[i+1,j]:
n1 = numbers[i,j]
n2 = numbers[i+1,j]
# row (i,j): -x_{i+1,j} + x_{i,j} + t
row[4*count] = n1
col[4*count] = n2
data[4*count] = -t_y_weights[i, j]
row[4*count+1] = n1
col[4*count+1] = n1
data[4*count+1] = t_y_weights[i, j]
# row (i, j+1): x_{i+1,j} - x_{i,j} - t
row[4*count+2] = n2
col[4*count+2] = n2
data[4*count+2] = t_y_weights[i, j]
row[4*count+3] = n2
col[4*count+3] = n1
data[4*count+3] = -t_y_weights[i, j]
count += 1
data2 = data
row2 = row
col2 = col
data = np.concatenate([data1, data2])
row = np.concatenate([row1, row2])
col = np.concatenate([col1, col2])
return scipy.sparse.coo_matrix((data, (row, col)), shape=(K, K))
| [
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"pyamg.ruge_stuben_solver",
"numpy.clip",
"numpy.max",
"numpy.concatenate"
] | [((192, 209), 'numpy.zeros', 'np.zeros', (['I.shape'], {}), '(I.shape)\n', (200, 209), True, 'import numpy as np\n'), ((273, 290), 'numpy.zeros', 'np.zeros', (['I.shape'], {}), '(I.shape)\n', (281, 290), True, 'import numpy as np\n'), ((1316, 1343), 'pyamg.ruge_stuben_solver', 'pyamg.ruge_stuben_solver', (['A'], {}), '(A)\n', (1340, 1343), False, 'import pyamg\n'), ((1381, 1401), 'numpy.zeros', 'np.zeros', (['mask.shape'], {}), '(mask.shape)\n', (1389, 1401), True, 'import numpy as np\n'), ((2845, 2876), 'numpy.zeros', 'np.zeros', (['mask.shape'], {'dtype': 'int'}), '(mask.shape, dtype=int)\n', (2853, 2876), True, 'import numpy as np\n'), ((3312, 3323), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (3320, 3323), True, 'import numpy as np\n'), ((4552, 4571), 'numpy.zeros', 'np.zeros', (['(4 * count)'], {}), '(4 * count)\n', (4560, 4571), True, 'import numpy as np\n'), ((4580, 4599), 'numpy.zeros', 'np.zeros', (['(4 * count)'], {}), '(4 * count)\n', (4588, 4599), True, 'import numpy as np\n'), ((4608, 4627), 'numpy.zeros', 'np.zeros', (['(4 * count)'], {}), '(4 * count)\n', (4616, 4627), True, 'import numpy as np\n'), ((5660, 5679), 'numpy.zeros', 'np.zeros', (['(4 * count)'], {}), '(4 * count)\n', (5668, 5679), True, 'import numpy as np\n'), ((5688, 5707), 'numpy.zeros', 'np.zeros', (['(4 * count)'], {}), '(4 * count)\n', (5696, 5707), True, 'import numpy as np\n'), ((5716, 5735), 'numpy.zeros', 'np.zeros', (['(4 * count)'], {}), '(4 * count)\n', (5724, 5735), True, 'import numpy as np\n'), ((6606, 6636), 'numpy.concatenate', 'np.concatenate', (['[data1, data2]'], {}), '([data1, data2])\n', (6620, 6636), True, 'import numpy as np\n'), ((6647, 6675), 'numpy.concatenate', 'np.concatenate', (['[row1, row2]'], {}), '([row1, row2])\n', (6661, 6675), True, 'import numpy as np\n'), ((6686, 6714), 'numpy.concatenate', 'np.concatenate', (['[col1, col2]'], {}), '([col1, col2])\n', (6700, 6714), True, 'import numpy as np\n'), ((1058, 1076), 'numpy.ones', 'np.ones', (['t_y.shape'], {}), '(t_y.shape)\n', (1065, 1076), True, 'import numpy as np\n'), ((1127, 1145), 'numpy.ones', 'np.ones', (['t_x.shape'], {}), '(t_x.shape)\n', (1134, 1145), True, 'import numpy as np\n'), ((3284, 3299), 'numpy.max', 'np.max', (['numbers'], {}), '(numbers)\n', (3290, 3299), True, 'import numpy as np\n'), ((4277, 4292), 'numpy.max', 'np.max', (['numbers'], {}), '(numbers)\n', (4283, 4292), True, 'import numpy as np\n'), ((2146, 2163), 'numpy.abs', 'np.abs', (['(I_y - t_y)'], {}), '(I_y - t_y)\n', (2152, 2163), True, 'import numpy as np\n'), ((2189, 2206), 'numpy.abs', 'np.abs', (['(I_x - t_x)'], {}), '(I_x - t_x)\n', (2195, 2206), True, 'import numpy as np\n'), ((2241, 2276), 'numpy.clip', 'np.clip', (['t_y_err', 'EPSILON', 'np.infty'], {}), '(t_y_err, EPSILON, np.infty)\n', (2248, 2276), True, 'import numpy as np\n'), ((2311, 2346), 'numpy.clip', 'np.clip', (['t_x_err', 'EPSILON', 'np.infty'], {}), '(t_x_err, EPSILON, np.infty)\n', (2318, 2346), True, 'import numpy as np\n')] |
import numpy as np
import glob
import sys
from matplotlib import pyplot as plt
import stat_tools as st
from datetime import datetime,timedelta
import pysolar.solar as ps
from scipy.ndimage.morphology import binary_opening
from scipy import ndimage
import ephem
deg2rad=np.pi/180
camera='HD815_1'
# camera='HD17'
inpath='d:/data/images/'
params = {'HD815_1':[2821.0000,1440.6892,1431.0000,0.1701,0.0084,-0.2048,0.3467,-0.0041,-0.0033],\
'HD815_2':[2821.0000,1423.9111,1459.000,0.0311,-0.0091,0.1206,0.3455,-0.0035,-0.0032],\
'HD490':[2843.0000,1472.9511,1482.6685,0.1616,0.0210,-0.5859,0.3465,-0.0043,-0.0030], \
'HD17':[2817.249,1478.902,1462.346,-0.099,0.012,0.867,2]}
rot=params[camera][3]
nx0=ny0=params[camera][0]
nr0=(nx0+ny0)/4
cy,cx=params[camera][1:3]
c1,c2,c3=params[camera][6:9]
x0,y0=np.meshgrid(np.linspace(-nx0//2,nx0//2,nx0),np.linspace(-ny0//2,ny0//2,ny0));
r0=np.sqrt(x0**2+y0**2);
nx0=int(nx0+0.5); ny0=int(ny0+0.5)
mask=r0>1320
xstart=int(params[camera][2]-nx0/2+0.5); ystart=int(params[camera][1]-ny0/2+0.5)
gatech = ephem.Observer();
gatech.lat, gatech.lon = '40.88', '-72.87'
sun=ephem.Moon()
f=sorted(glob.glob(inpath+camera+'/'+camera+'_201802281[6-6]1'+'*jpg'))[0];
img=plt.imread(f).astype(np.float32);
fig,ax=plt.subplots(); ax.imshow(img.astype(np.uint8))
# flist=sorted(glob.glob(inpath+camera+'/'+camera+'_201802281[7-9]08[2,3,4]'+'*jpg'));
flist=sorted(glob.glob(inpath+camera+'/'+camera+'_201802281[4-9]1'+'*jpg'));
for h in range(24):
doy='20180309'+'{:02d}'.format(h)+'1010';
gatech.date = datetime.strptime(doy,'%Y%m%d%H%M%S').strftime('%Y/%m/%d %H:%M:%S')
sun.compute(gatech)
if sun.alt<0: continue
sz=np.pi/2-sun.alt; saz=(rot+sun.az-np.pi)%(2*np.pi);
# rref=c1*sz+c2*sz**3+c3*sz**5
# xref,yref=cx+nx0*rref*np.sin(saz),cy+ny0*rref*np.cos(saz)
rref=np.sin(sz/2)*np.sqrt(2)*nr0
xref,yref=cx+rref*np.sin(saz),cy+rref*np.cos(saz)
circle=plt.Circle((xref,yref),30,color='blue')
ax.add_artist(circle)
| [
"ephem.Moon",
"ephem.Observer",
"datetime.datetime.strptime",
"numpy.sin",
"matplotlib.pyplot.Circle",
"numpy.linspace",
"glob.glob",
"numpy.cos",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((910, 936), 'numpy.sqrt', 'np.sqrt', (['(x0 ** 2 + y0 ** 2)'], {}), '(x0 ** 2 + y0 ** 2)\n', (917, 936), True, 'import numpy as np\n'), ((1072, 1088), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (1086, 1088), False, 'import ephem\n'), ((1138, 1150), 'ephem.Moon', 'ephem.Moon', ([], {}), '()\n', (1148, 1150), False, 'import ephem\n'), ((1277, 1291), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1289, 1291), True, 'from matplotlib import pyplot as plt\n'), ((840, 877), 'numpy.linspace', 'np.linspace', (['(-nx0 // 2)', '(nx0 // 2)', 'nx0'], {}), '(-nx0 // 2, nx0 // 2, nx0)\n', (851, 877), True, 'import numpy as np\n'), ((872, 909), 'numpy.linspace', 'np.linspace', (['(-ny0 // 2)', '(ny0 // 2)', 'ny0'], {}), '(-ny0 // 2, ny0 // 2, ny0)\n', (883, 909), True, 'import numpy as np\n'), ((1426, 1497), 'glob.glob', 'glob.glob', (["(inpath + camera + '/' + camera + '_201802281[4-9]1' + '*jpg')"], {}), "(inpath + camera + '/' + camera + '_201802281[4-9]1' + '*jpg')\n", (1435, 1497), False, 'import glob\n'), ((1999, 2041), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(xref, yref)', '(30)'], {'color': '"""blue"""'}), "((xref, yref), 30, color='blue')\n", (2009, 2041), True, 'from matplotlib import pyplot as plt\n'), ((1162, 1233), 'glob.glob', 'glob.glob', (["(inpath + camera + '/' + camera + '_201802281[6-6]1' + '*jpg')"], {}), "(inpath + camera + '/' + camera + '_201802281[6-6]1' + '*jpg')\n", (1171, 1233), False, 'import glob\n'), ((1235, 1248), 'matplotlib.pyplot.imread', 'plt.imread', (['f'], {}), '(f)\n', (1245, 1248), True, 'from matplotlib import pyplot as plt\n'), ((1586, 1624), 'datetime.datetime.strptime', 'datetime.strptime', (['doy', '"""%Y%m%d%H%M%S"""'], {}), "(doy, '%Y%m%d%H%M%S')\n", (1603, 1624), False, 'from datetime import datetime, timedelta\n'), ((1894, 1908), 'numpy.sin', 'np.sin', (['(sz / 2)'], {}), '(sz / 2)\n', (1900, 1908), True, 'import numpy as np\n'), ((1907, 1917), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1914, 1917), True, 'import numpy as np\n'), ((1944, 1955), 'numpy.sin', 'np.sin', (['saz'], {}), '(saz)\n', (1950, 1955), True, 'import numpy as np\n'), ((1964, 1975), 'numpy.cos', 'np.cos', (['saz'], {}), '(saz)\n', (1970, 1975), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""sbpy Spectroscopy Sources Module
Spectrophotometric classes that encapsulate synphot.SpectralSource and
synphot.Observation in order to generate sbpy spectra and photometry.
Requires synphot.
"""
__all__ = [
'BlackbodySource', 'Reddening'
]
import numpy as np
from abc import ABC
import astropy.units as u
from astropy.utils.data import download_file, _is_url
try:
import synphot
from synphot import SpectralElement, BaseUnitlessSpectrum
except ImportError:
synphot = None
class SpectralElement:
pass
from ..exceptions import SbpyException
__doctest_requires__ = {
'SpectralSource': ['synphot'],
'BlackbodySource': ['synphot'],
}
class SinglePointSpectrumError(SbpyException):
"""Single point provided, but multiple values expected."""
class SynphotRequired(SbpyException):
pass
class SpectralSource(ABC):
"""Abstract base class for SBPy spectral sources.
Requires `~synphot`.
Parameters
----------
source : `~synphot.SourceSpectrum`
The source spectrum.
description : string, optional
A brief description of the source spectrum.
Attributes
----------
wave - Wavelengths of the source spectrum.
fluxd - Source spectrum.
description - Brief description of the source spectrum.
meta - Meta data from ``source``, if any.
"""
def __init__(self, source, description=None):
if synphot is None:
raise SynphotRequired(
'synphot required for {}.'.format(self.__class__.__name__))
self._source = source
self._description = description
@classmethod
def from_array(cls, wave, fluxd, meta=None, **kwargs):
"""Create standard from arrays.
Parameters
----------
wave : `~astropy.units.Quantity`
The spectral wavelengths.
fluxd : `~astropy.units.Quantity`
The spectral flux densities.
meta : dict, optional
Meta data.
**kwargs
Passed to object initialization.
"""
if synphot is None:
raise ImportError(
'synphot required for {}.'.format(cls.__name__))
source = synphot.SourceSpectrum(
synphot.Empirical1D, points=wave, lookup_table=fluxd,
meta=meta)
return cls(source, **kwargs)
@classmethod
def from_file(cls, filename, wave_unit=None, flux_unit=None,
cache=True, **kwargs):
"""Load the source spectrum from a file.
NaNs are dropped.
Parameters
----------
filename : string
The name of the file. See
`~synphot.SourceSpectrum.from_file` for details.
wave_unit, flux_unit : str or `~astropy.units.Unit`, optional
Wavelength and flux units in the file.
cache : bool, optional
If ``True``, cache the contents of URLs.
**kwargs
Passed to object initialization.
"""
if synphot is None:
raise ImportError(
'synphot required for {}.'.format(cls.__name__))
if filename.lower().endswith(('.fits', '.fit', '.fz')):
read_spec = synphot.specio.read_fits_spec
else:
read_spec = synphot.specio.read_ascii_spec
# URL cache because synphot.SourceSpectrum.from_file does not
if _is_url(filename):
fn = download_file(filename, cache=True)
else:
fn = filename
spec = read_spec(fn, wave_unit=wave_unit, flux_unit=flux_unit)
i = np.isfinite(spec[1] * spec[2])
source = synphot.SourceSpectrum(
synphot.Empirical1D, points=spec[1][i], lookup_table=spec[2][i],
meta={'header': spec[0]})
return cls(source, **kwargs)
@property
def description(self):
"""Description of the source spectrum."""
return self._description
@property
def wave(self):
"""Wavelengths of the source spectrum."""
return self.source.waveset
@property
def fluxd(self):
"""The source spectrum."""
return self.source(self._source.waveset, flux_unit='W / (m2 um)')
@property
def source(self):
return self._source
@property
def meta(self):
self._source.meta
def __call__(self, wave_or_freq, unit=None):
"""Evaluate/interpolate the source spectrum.
Parameters
----------
wave_or_freq : `~astropy.units.Quantity`
Requested wavelengths or frequencies of the resulting
spectrum.
unit : string, `~astropy.units.Unit`, optional
Spectral units of the output (flux density). If ``None``,
the default depends on ``wave_or_freq``: W/(m2 μm) for
wavelengths, Jy for frequencies.
Returns
-------
fluxd : `~astropy.units.Quantity`
The spectrum evaluated/interpolated to the requested
wavelengths or frequencies.
"""
from .. import units as sbu # avoid circular dependency
if unit is not None:
unit = u.Unit(unit)
elif wave_or_freq.unit.is_equivalent('m'):
unit = u.Unit('W/(m2 um)')
else:
unit = u.Jy
if unit.is_equivalent(sbu.VEGA):
fluxd = self.source(wave_or_freq, 'W/(m2 um)').to(
unit, sbu.spectral_density_vega(wave_or_freq))
else:
fluxd = self.source(wave_or_freq, unit)
return fluxd
def observe(self, wfb, unit=None, interpolate=False, **kwargs):
"""Observe source as through filters or spectrometer.
Calls ``observe_bandpass``, ``observe_spectrum``, or
``self()``, as appropriate.
Parameters
----------
wfb : `~astropy.units.Quantity`, `~synphot.SpectralElement`
Wavelengths, frequencies, or bandpasses. May also be a
list of ``SpectralElement``s.
unit : string, `~astropy.units.Unit`, optional
Spectral flux density units for the output. If ``None``,
the default depends on ``wfb``: W/(m2 μm) for wavelengths
or bandpasses, Jy for frequencies.
interpolate : bool, optional
For wavelengths/frequencies, set to ``True`` for
interpolation instead of rebinning. Use this when the
spectral resolution of the source is close to that of the
requested wavelengths.
**kwargs
Additional keyword arguments for
`~synphot.observation.Observation`, e.g., ``force``.
Returns
-------
fluxd : `~astropy.units.Quantity`
The spectrum rebinned.
Raises
------
SinglePointSpectrumError - If requested wavelengths or
frequencies has only one value.
Notes
-----
Method for spectra adapted from AstroBetter post by <NAME>:
https://www.astrobetter.com/blog/2013/08/12/python-tip-re-sampling-spectra-with-pysynphot/
"""
if isinstance(wfb, (list, tuple, SpectralElement)):
lambda_eff, fluxd = self.observe_bandpass(
wfb, unit=unit, **kwargs)
elif isinstance(wfb, u.Quantity):
if interpolate:
fluxd = self(wfb, unit=unit)
else:
fluxd = self.observe_spectrum(wfb, unit=unit, **kwargs)
else:
raise TypeError('Unsupported type for `wfb` type: {}'
.format(type(wfb)))
return fluxd
def observe_bandpass(self, bp, unit=None, **kwargs):
"""Observe through a bandpass.
Parameters
----------
bp : `~synphot.SpectralElement`, list, or tuple
Bandpass.
unit : string, `~astropy.units.Unit`, optional
Spectral flux density units for the output. The default
is W/(m2 μm).
**kwargs
Additional keyword arguments for
`~synphot.observation.Observation`, e.g., ``force``.
Returns
-------
lambda_eff : `~astropy.units.Quantity`
Effective wavelength(s) of the observation(s).
fluxd : `~astropy.units.Quantity`
The spectrum rebinned.
"""
from .. import units as sbu # avoid circular dependency
# promote single bandpasses to a list, but preserve number of
# dimensions
if isinstance(bp, (SpectralElement, str)):
ndim = 0
bp = [bp]
else:
ndim = np.ndim(bp)
if unit is None:
unit = u.Unit('W/(m2 um)')
else:
unit = u.Unit(unit)
fluxd = np.ones(len(bp)) * unit
for i in range(len(bp)):
obs = synphot.Observation(self.source, bp[i], **kwargs)
lambda_eff = obs.effective_wavelength()
lambda_pivot = obs.bandpass.pivot()
_fluxd = obs.effstim('W/(m2 um)')
if unit.is_equivalent(sbu.VEGAmag):
fluxd[i] = _fluxd.to(unit, sbu.spectral_density_vega(bp[i]))
else:
fluxd[i] = _fluxd.to(unit, u.spectral_density(lambda_pivot))
if np.ndim(fluxd) != ndim:
fluxd = fluxd.squeeze()
return lambda_eff, fluxd
def observe_spectrum(self, wave_or_freq, unit=None, **kwargs):
"""Observe source as through a spectrometer.
.. Important:: This method works best when the requested
spectral resolution is lower than the spectral resolution
of the internal data. If the requested
wavelengths/frequencies are exactly the same as the
internal spectrum, then the internal spectrum will be
returned without binning. This special case does not work
for subsets of the wavelengths.
Parameters
----------
wave_or_freq : `~astropy.units.Quantity`
Wavelengths or frequencies of the spectrum. Spectral bins
will be centered at these values. The length must be
larger than 1.
unit : string, `~astropy.units.Unit`, optional
Spectral flux density units for the output. If ``None``,
the default is W/(m2 μm) for wavelengths, Jy for
frequencies.
**kwargs
Additional keyword arguments for
`~synphot.observation.Observation`, e.g., ``force``.
Returns
-------
fluxd : `~astropy.units.Quantity`
The spectrum rebinned.
Raises
------
SinglePointSpectrumError - If requested wavelengths or
frequencies has only one value.
Notes
-----
Method for spectra adapted from AstroBetter post by <NAME>:
https://www.astrobetter.com/blog/2013/08/12/python-tip-re-sampling-spectra-with-pysynphot/
"""
from .. import units as sbu # avoid circular dependency
if np.size(wave_or_freq) == 1:
raise SinglePointSpectrumError(
'Multiple wavelengths or frequencies required for '
'observe_spectrum. Instead consider interpolation '
'with {}().'
.format(self.__class__.__name__))
if unit is None:
if wave_or_freq.unit.is_equivalent('m'):
unit = u.Unit('W/(m2 um)')
else:
unit = u.Jy
else:
unit = u.Unit(unit)
specele = synphot.SpectralElement(synphot.ConstFlux1D, amplitude=1)
# Specele is defined over all wavelengths, but most spectral
# standards are not. force='taper' will affect retrieving
# flux densities at the edges of the spectrum, but is
# preferred to avoid wild extrapolation.
kwargs['force'] = kwargs.get('force', 'taper')
obs = synphot.Observation(
self.source, specele, binset=wave_or_freq, **kwargs)
if unit.is_equivalent(sbu.VEGAmag):
fluxd = obs.sample_binned(flux_unit='W/(m2 um)').to(
unit, sbu.spectral_density_vega(wave_or_freq))
else:
fluxd = obs.sample_binned(flux_unit=unit)
return fluxd
def color_index(self, wfb, unit):
"""Color index (magnitudes) and effective wavelengths.
Parameters
----------
wfb : `~astropy.units.Quantity` or tuple of `~synphot.SectralElement`
Two wavelengths, frequencies, or bandpasses.
unit : string or `~astropy.units.MagUnit`
Units for the calculation, e.g., ``astropy.units.ABmag`` or
``sbpy.units.VEGAmag``.
Returns
-------
eff_wave : `~astropy.units.Quantity`
Effective wavelengths for each ``wfb``.
ci : `~astropy.units.Quantity`
Color index, ``m_0 - m_1``, where 0 and 1 are element
indexes for ``wfb``.
"""
eff_wave = []
m = np.zeros(2) * u.Unit(unit)
for i in range(2):
if isinstance(wfb[i], u.Quantity):
if wfb[i].unit.is_equivalent(u.Hz):
eff_wave.append(wfb[i].to(u.um, u.spectral()))
else:
eff_wave.append(wfb[i])
m[i] = self(eff_wave[i], unit=unit)
elif isinstance(wfb[i], (list, tuple, SpectralElement)):
w, m[i] = self.observe_bandpass(wfb[i], unit=unit)
eff_wave.append(w)
else:
raise TypeError('Unsupported type for `wfb` type: {}'
.format(type(wfb[i])))
ci = m[0] - m[1]
return u.Quantity(eff_wave), ci
def redden(self, S):
"""Redden the spectrum.
Parameters
----------
S : `~SpectralGradient`
The spectral gradient to redden.
Returns
-------
spec : `~SpectralSource`
Reddened spectrum
"""
from copy import deepcopy
r = Reddening(S)
red_spec = deepcopy(self)
red_spec._source = red_spec.source * r
if red_spec.description is not None:
red_spec._description = '{} reddened by {} at {}'.format(
red_spec.description, S, S.wave0)
return red_spec
class BlackbodySource(SpectralSource):
"""Blackbody sphere.
Spectral flux densities are calculated from ``pi * B(T)``, where
``B`` is the Planck function.
Parameters
----------
T : `~astropy.units.Quantity`, required
Temperature in Kelvin.
"""
def __init__(self, T=None):
super().__init__(None, description='πB(T)')
if T is None:
raise TypeError('T is required.')
self._T = u.Quantity(T, u.K)
self._source = synphot.SourceSpectrum(
synphot.BlackBody1D, temperature=self._T.value) * np.pi
def __repr__(self):
return '<BlackbodySource: T={}>'.format(self._T)
@property
def T(self):
return self._T
class Reddening(BaseUnitlessSpectrum):
"""Class to handle simple linear reddening.
Parameters
----------
S : `~SpectralGradient`
The spectral gradient to redden.
"""
@u.quantity_input(S=u.percent / u.um)
def __init__(self, S):
if getattr(S, 'wave0', None) is None:
raise ValueError("Normalization wavelength in `S` (.wave0) is "
"required by not available.")
wv = [1, 2] * S.wave0
df = (S.wave0 * S).to('').value
super().__init__(
synphot.Empirical1D, points=wv, lookup_table=[1, 1+df],
fill_value=None)
| [
"synphot.SpectralElement",
"copy.deepcopy",
"astropy.units.Quantity",
"numpy.size",
"astropy.units.quantity_input",
"numpy.ndim",
"numpy.isfinite",
"numpy.zeros",
"astropy.utils.data._is_url",
"synphot.Observation",
"astropy.units.spectral",
"astropy.utils.data.download_file",
"astropy.units... | [((15408, 15444), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'S': '(u.percent / u.um)'}), '(S=u.percent / u.um)\n', (15424, 15444), True, 'import astropy.units as u\n'), ((2295, 2386), 'synphot.SourceSpectrum', 'synphot.SourceSpectrum', (['synphot.Empirical1D'], {'points': 'wave', 'lookup_table': 'fluxd', 'meta': 'meta'}), '(synphot.Empirical1D, points=wave, lookup_table=fluxd,\n meta=meta)\n', (2317, 2386), False, 'import synphot\n'), ((3490, 3507), 'astropy.utils.data._is_url', '_is_url', (['filename'], {}), '(filename)\n', (3497, 3507), False, 'from astropy.utils.data import download_file, _is_url\n'), ((3686, 3716), 'numpy.isfinite', 'np.isfinite', (['(spec[1] * spec[2])'], {}), '(spec[1] * spec[2])\n', (3697, 3716), True, 'import numpy as np\n'), ((3734, 3852), 'synphot.SourceSpectrum', 'synphot.SourceSpectrum', (['synphot.Empirical1D'], {'points': 'spec[1][i]', 'lookup_table': 'spec[2][i]', 'meta': "{'header': spec[0]}"}), "(synphot.Empirical1D, points=spec[1][i], lookup_table\n =spec[2][i], meta={'header': spec[0]})\n", (3756, 3852), False, 'import synphot\n'), ((11662, 11719), 'synphot.SpectralElement', 'synphot.SpectralElement', (['synphot.ConstFlux1D'], {'amplitude': '(1)'}), '(synphot.ConstFlux1D, amplitude=1)\n', (11685, 11719), False, 'import synphot\n'), ((12038, 12110), 'synphot.Observation', 'synphot.Observation', (['self.source', 'specele'], {'binset': 'wave_or_freq'}), '(self.source, specele, binset=wave_or_freq, **kwargs)\n', (12057, 12110), False, 'import synphot\n'), ((14221, 14235), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (14229, 14235), False, 'from copy import deepcopy\n'), ((14935, 14953), 'astropy.units.Quantity', 'u.Quantity', (['T', 'u.K'], {}), '(T, u.K)\n', (14945, 14953), True, 'import astropy.units as u\n'), ((3526, 3561), 'astropy.utils.data.download_file', 'download_file', (['filename'], {'cache': '(True)'}), '(filename, cache=True)\n', (3539, 3561), False, 'from astropy.utils.data import download_file, _is_url\n'), ((5255, 5267), 'astropy.units.Unit', 'u.Unit', (['unit'], {}), '(unit)\n', (5261, 5267), True, 'import astropy.units as u\n'), ((8724, 8735), 'numpy.ndim', 'np.ndim', (['bp'], {}), '(bp)\n', (8731, 8735), True, 'import numpy as np\n'), ((8781, 8800), 'astropy.units.Unit', 'u.Unit', (['"""W/(m2 um)"""'], {}), "('W/(m2 um)')\n", (8787, 8800), True, 'import astropy.units as u\n'), ((8834, 8846), 'astropy.units.Unit', 'u.Unit', (['unit'], {}), '(unit)\n', (8840, 8846), True, 'import astropy.units as u\n'), ((8939, 8988), 'synphot.Observation', 'synphot.Observation', (['self.source', 'bp[i]'], {}), '(self.source, bp[i], **kwargs)\n', (8958, 8988), False, 'import synphot\n'), ((9368, 9382), 'numpy.ndim', 'np.ndim', (['fluxd'], {}), '(fluxd)\n', (9375, 9382), True, 'import numpy as np\n'), ((11141, 11162), 'numpy.size', 'np.size', (['wave_or_freq'], {}), '(wave_or_freq)\n', (11148, 11162), True, 'import numpy as np\n'), ((11630, 11642), 'astropy.units.Unit', 'u.Unit', (['unit'], {}), '(unit)\n', (11636, 11642), True, 'import astropy.units as u\n'), ((13141, 13152), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (13149, 13152), True, 'import numpy as np\n'), ((13155, 13167), 'astropy.units.Unit', 'u.Unit', (['unit'], {}), '(unit)\n', (13161, 13167), True, 'import astropy.units as u\n'), ((13835, 13855), 'astropy.units.Quantity', 'u.Quantity', (['eff_wave'], {}), '(eff_wave)\n', (13845, 13855), True, 'import astropy.units as u\n'), ((14977, 15047), 'synphot.SourceSpectrum', 'synphot.SourceSpectrum', (['synphot.BlackBody1D'], {'temperature': 'self._T.value'}), '(synphot.BlackBody1D, temperature=self._T.value)\n', (14999, 15047), False, 'import synphot\n'), ((5338, 5357), 'astropy.units.Unit', 'u.Unit', (['"""W/(m2 um)"""'], {}), "('W/(m2 um)')\n", (5344, 5357), True, 'import astropy.units as u\n'), ((11531, 11550), 'astropy.units.Unit', 'u.Unit', (['"""W/(m2 um)"""'], {}), "('W/(m2 um)')\n", (11537, 11550), True, 'import astropy.units as u\n'), ((9322, 9354), 'astropy.units.spectral_density', 'u.spectral_density', (['lambda_pivot'], {}), '(lambda_pivot)\n', (9340, 9354), True, 'import astropy.units as u\n'), ((13346, 13358), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (13356, 13358), True, 'import astropy.units as u\n')] |
from collections import defaultdict
from distutils.version import LooseVersion
import numpy as np
import pytest
from opensfm import multiview, types, geo
from opensfm.synthetic_data import synthetic_examples
from opensfm.synthetic_data import synthetic_scene
def pytest_configure(config):
use_legacy_numpy_printoptions()
def use_legacy_numpy_printoptions():
"""Ensure numpy use legacy print formant."""
if LooseVersion(np.__version__).version[:2] > [1, 13]:
np.set_printoptions(legacy="1.13")
@pytest.fixture(scope="module")
def null_scene() -> types.Reconstruction:
reconstruction = types.Reconstruction()
return reconstruction
@pytest.fixture(scope="module")
def scene_synthetic() -> synthetic_scene.SyntheticInputData:
np.random.seed(42)
data = synthetic_examples.synthetic_circle_scene()
maximum_depth = 40
projection_noise = 1.0
gps_noise = 5.0
imu_noise = 0.1
gcp_noise = (0.01, 0.1)
gcps_count = 10
gcps_shift = [10.0, 0.0, 100.0]
reference = geo.TopocentricConverter(47.0, 6.0, 0)
return synthetic_scene.SyntheticInputData(
data.get_reconstruction(),
reference,
maximum_depth,
projection_noise,
gps_noise,
imu_noise,
gcp_noise,
False,
gcps_count,
gcps_shift,
)
@pytest.fixture(scope="session")
def scene_synthetic_cube():
np.random.seed(42)
data = synthetic_examples.synthetic_cube_scene()
reference = geo.TopocentricConverter(47.0, 6.0, 0)
reconstruction = data.get_reconstruction()
input_data = synthetic_scene.SyntheticInputData(
reconstruction, reference, 40, 0.0, 0.0, 0.0, (0.0, 0.0), False
)
return reconstruction, input_data.tracks_manager
@pytest.fixture(scope="module")
def scene_synthetic_rig() -> synthetic_scene.SyntheticInputData:
np.random.seed(42)
data = synthetic_examples.synthetic_rig_scene()
maximum_depth = 40
projection_noise = 1.0
gps_noise = 0.1
imu_noise = 0.1
gcp_noise = (0.0, 0.0)
reference = geo.TopocentricConverter(47.0, 6.0, 0)
return synthetic_scene.SyntheticInputData(
data.get_reconstruction(),
reference,
maximum_depth,
projection_noise,
gps_noise,
imu_noise,
gcp_noise,
False,
)
@pytest.fixture(scope="module")
def scene_synthetic_triangulation() -> synthetic_scene.SyntheticInputData:
np.random.seed(42)
data = synthetic_examples.synthetic_circle_scene()
maximum_depth = 40
projection_noise = 1.0
gps_noise = 0.1
imu_noise = 1.0
gcp_noise = (0.0, 0.0)
gcps_count = 10
gcps_shift = [10.0, 0.0, 100.0]
reference = geo.TopocentricConverter(47.0, 6.0, 0)
return synthetic_scene.SyntheticInputData(
data.get_reconstruction(),
reference,
maximum_depth,
projection_noise,
gps_noise,
imu_noise,
gcp_noise,
False,
gcps_count,
gcps_shift,
)
@pytest.fixture(scope="module")
def pairs_and_poses():
np.random.seed(42)
data = synthetic_examples.synthetic_cube_scene()
reconstruction = data.get_reconstruction()
reference = geo.TopocentricConverter(0, 0, 0)
input_data = synthetic_scene.SyntheticInputData(
reconstruction, reference, 40, 0.0, 0.0, 0.0, (0.0, 0.0), False
)
features, tracks_manager = input_data.features, input_data.tracks_manager
points_keys = list(reconstruction.points.keys())
pairs, poses = defaultdict(list), defaultdict(list)
for im1, im2 in tracks_manager.get_all_pairs_connectivity():
tuples = tracks_manager.get_all_common_observations(im1, im2)
f1 = [p.point for k, p, _ in tuples if k in points_keys]
f2 = [p.point for k, _, p in tuples if k in points_keys]
pairs[im1, im2].append((f1, f2))
pose1 = reconstruction.shots[im1].pose
pose2 = reconstruction.shots[im2].pose
poses[im1, im2] = pose2.relative_to(pose1)
camera = list(reconstruction.cameras.values())[0]
return pairs, poses, camera, features, tracks_manager, reconstruction
@pytest.fixture(scope="module")
def pairs_and_their_E(pairs_and_poses):
pairs, poses, camera, _, _, _ = pairs_and_poses
pairs = list(sorted(zip(pairs.values(), poses.values()), key=lambda x: -len(x[0])))
num_pairs = 20
indices = [np.random.randint(0, len(pairs) - 1) for i in range(num_pairs)]
ret_pairs = []
for idx in indices:
pair = pairs[idx]
p1 = np.array([x for x, _ in pair[0]])
p2 = np.array([x for _, x in pair[0]])
p1 = p1.reshape(-1, p1.shape[-1])
p2 = p2.reshape(-1, p2.shape[-1])
f1 = camera.pixel_bearing_many(p1)
f2 = camera.pixel_bearing_many(p2)
pose = pair[1]
R = pose.get_rotation_matrix()
t_x = multiview.cross_product_matrix(pose.get_origin())
e = R.dot(t_x)
e /= np.linalg.norm(e)
ret_pairs.append((f1, f2, e, pose))
return ret_pairs
@pytest.fixture(scope="module")
def shots_and_their_points(pairs_and_poses):
_, _, _, _, tracks_manager, reconstruction = pairs_and_poses
ret_shots = []
for shot in reconstruction.shots.values():
bearings, points = [], []
for k, obs in tracks_manager.get_shot_observations(shot.id).items():
if k not in reconstruction.points:
continue
p = reconstruction.points[k]
bearings.append(shot.camera.pixel_bearing(obs.point))
points.append(p.coordinates)
ret_shots.append((shot.pose, np.array(bearings), np.array(points)))
return ret_shots
| [
"numpy.set_printoptions",
"numpy.random.seed",
"opensfm.synthetic_data.synthetic_examples.synthetic_circle_scene",
"distutils.version.LooseVersion",
"pytest.fixture",
"opensfm.geo.TopocentricConverter",
"collections.defaultdict",
"opensfm.synthetic_data.synthetic_examples.synthetic_cube_scene",
"num... | [((521, 551), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (535, 551), False, 'import pytest\n'), ((667, 697), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (681, 697), False, 'import pytest\n'), ((1340, 1371), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1354, 1371), False, 'import pytest\n'), ((1766, 1796), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1780, 1796), False, 'import pytest\n'), ((2342, 2372), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2356, 2372), False, 'import pytest\n'), ((3028, 3058), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3042, 3058), False, 'import pytest\n'), ((4158, 4188), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (4172, 4188), False, 'import pytest\n'), ((5054, 5084), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (5068, 5084), False, 'import pytest\n'), ((615, 637), 'opensfm.types.Reconstruction', 'types.Reconstruction', ([], {}), '()\n', (635, 637), False, 'from opensfm import multiview, types, geo\n'), ((763, 781), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (777, 781), True, 'import numpy as np\n'), ((793, 836), 'opensfm.synthetic_data.synthetic_examples.synthetic_circle_scene', 'synthetic_examples.synthetic_circle_scene', ([], {}), '()\n', (834, 836), False, 'from opensfm.synthetic_data import synthetic_examples\n'), ((1030, 1068), 'opensfm.geo.TopocentricConverter', 'geo.TopocentricConverter', (['(47.0)', '(6.0)', '(0)'], {}), '(47.0, 6.0, 0)\n', (1054, 1068), False, 'from opensfm import multiview, types, geo\n'), ((1404, 1422), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1418, 1422), True, 'import numpy as np\n'), ((1434, 1475), 'opensfm.synthetic_data.synthetic_examples.synthetic_cube_scene', 'synthetic_examples.synthetic_cube_scene', ([], {}), '()\n', (1473, 1475), False, 'from opensfm.synthetic_data import synthetic_examples\n'), ((1493, 1531), 'opensfm.geo.TopocentricConverter', 'geo.TopocentricConverter', (['(47.0)', '(6.0)', '(0)'], {}), '(47.0, 6.0, 0)\n', (1517, 1531), False, 'from opensfm import multiview, types, geo\n'), ((1596, 1699), 'opensfm.synthetic_data.synthetic_scene.SyntheticInputData', 'synthetic_scene.SyntheticInputData', (['reconstruction', 'reference', '(40)', '(0.0)', '(0.0)', '(0.0)', '(0.0, 0.0)', '(False)'], {}), '(reconstruction, reference, 40, 0.0, 0.0,\n 0.0, (0.0, 0.0), False)\n', (1630, 1699), False, 'from opensfm.synthetic_data import synthetic_scene\n'), ((1866, 1884), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1880, 1884), True, 'import numpy as np\n'), ((1896, 1936), 'opensfm.synthetic_data.synthetic_examples.synthetic_rig_scene', 'synthetic_examples.synthetic_rig_scene', ([], {}), '()\n', (1934, 1936), False, 'from opensfm.synthetic_data import synthetic_examples\n'), ((2072, 2110), 'opensfm.geo.TopocentricConverter', 'geo.TopocentricConverter', (['(47.0)', '(6.0)', '(0)'], {}), '(47.0, 6.0, 0)\n', (2096, 2110), False, 'from opensfm import multiview, types, geo\n'), ((2452, 2470), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2466, 2470), True, 'import numpy as np\n'), ((2482, 2525), 'opensfm.synthetic_data.synthetic_examples.synthetic_circle_scene', 'synthetic_examples.synthetic_circle_scene', ([], {}), '()\n', (2523, 2525), False, 'from opensfm.synthetic_data import synthetic_examples\n'), ((2718, 2756), 'opensfm.geo.TopocentricConverter', 'geo.TopocentricConverter', (['(47.0)', '(6.0)', '(0)'], {}), '(47.0, 6.0, 0)\n', (2742, 2756), False, 'from opensfm import multiview, types, geo\n'), ((3086, 3104), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3100, 3104), True, 'import numpy as np\n'), ((3116, 3157), 'opensfm.synthetic_data.synthetic_examples.synthetic_cube_scene', 'synthetic_examples.synthetic_cube_scene', ([], {}), '()\n', (3155, 3157), False, 'from opensfm.synthetic_data import synthetic_examples\n'), ((3222, 3255), 'opensfm.geo.TopocentricConverter', 'geo.TopocentricConverter', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3246, 3255), False, 'from opensfm import multiview, types, geo\n'), ((3273, 3376), 'opensfm.synthetic_data.synthetic_scene.SyntheticInputData', 'synthetic_scene.SyntheticInputData', (['reconstruction', 'reference', '(40)', '(0.0)', '(0.0)', '(0.0)', '(0.0, 0.0)', '(False)'], {}), '(reconstruction, reference, 40, 0.0, 0.0,\n 0.0, (0.0, 0.0), False)\n', (3307, 3376), False, 'from opensfm.synthetic_data import synthetic_scene\n'), ((483, 517), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (502, 517), True, 'import numpy as np\n'), ((3538, 3555), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3549, 3555), False, 'from collections import defaultdict\n'), ((3557, 3574), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3568, 3574), False, 'from collections import defaultdict\n'), ((4553, 4586), 'numpy.array', 'np.array', (['[x for x, _ in pair[0]]'], {}), '([x for x, _ in pair[0]])\n', (4561, 4586), True, 'import numpy as np\n'), ((4600, 4633), 'numpy.array', 'np.array', (['[x for _, x in pair[0]]'], {}), '([x for _, x in pair[0]])\n', (4608, 4633), True, 'import numpy as np\n'), ((4967, 4984), 'numpy.linalg.norm', 'np.linalg.norm', (['e'], {}), '(e)\n', (4981, 4984), True, 'import numpy as np\n'), ((423, 451), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (435, 451), False, 'from distutils.version import LooseVersion\n'), ((5630, 5648), 'numpy.array', 'np.array', (['bearings'], {}), '(bearings)\n', (5638, 5648), True, 'import numpy as np\n'), ((5650, 5666), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (5658, 5666), True, 'import numpy as np\n')] |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import io
from typing import Any, Dict, List
import numpy as np
from jina.executors.decorators import batching, single
from jina.executors.encoders.frameworks import BaseTFEncoder
from jina.executors.segmenters import BaseSegmenter
from jinahub.vggish_input import *
from jinahub.vggish_params import *
from jinahub.vggish_postprocess import *
from jinahub.vggish_slim import *
class VggishEncoder(BaseTFEncoder):
def __init__(self, model_path: str, pca_path: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_path = model_path
self.pca_path = pca_path
def post_init(self):
self.to_device()
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
self.sess = tf.compat.v1.Session()
define_vggish_slim()
load_vggish_slim_checkpoint(self.sess, self.model_path)
self.feature_tensor = self.sess.graph.get_tensor_by_name(
INPUT_TENSOR_NAME)
self.embedding_tensor = self.sess.graph.get_tensor_by_name(
OUTPUT_TENSOR_NAME)
self.post_processor = Postprocessor(self.pca_path)
@batching
def encode(self, content: Any, *args, **kwargs) -> Any:
[embedding_batch] = self.sess.run([self.embedding_tensor],
feed_dict={self.feature_tensor: content})
result = self.post_processor.postprocess(embedding_batch)
return (np.float32(result) - 128.) / 128.
class VggishSegmenter(BaseSegmenter):
def __init__(self, window_length_secs=0.025, hop_length_secs=0.010, *args, **kwargs):
"""
:param frame_length: the number of samples in each frame
:param hop_length: number of samples to advance between frames
"""
super().__init__(*args, **kwargs)
self.window_length_secs = window_length_secs
self.hop_length_secs = hop_length_secs
@single(slice_nargs=2, flatten_output=False)
def segment(self, uri, buffer, *args, **kwargs) -> List[Dict]:
result = []
# load the data
data, sample_rate = self.read_wav(uri, buffer)
if data is None:
return result
# slice the wav array
mel_data = self.wav2mel(data, sample_rate)
for idx, blob in enumerate(mel_data):
self.logger.debug(f'blob: {blob.shape}')
result.append(dict(offset=idx, weight=1.0, blob=blob))
return result
def wav2mel(self, blob, sample_rate):
self.logger.debug(f'blob: {blob.shape}, sample_rate: {sample_rate}')
mel_spec = waveform_to_examples(blob, sample_rate).squeeze()
self.logger.debug(f'mel_spec: {mel_spec.shape}')
return mel_spec
def read_wav(self, uri, buffer):
import soundfile as sf
wav_data = None
sample_rate = None
if buffer:
wav_data, sample_rate = sf.read(io.BytesIO(buffer), dtype='int16')
elif uri:
wav_data, sample_rate = sf.read(uri, dtype='int16')
else:
return None, None
self.logger.debug(f'sample_rate: {sample_rate}')
if len(wav_data.shape) > 1:
wav_data = np.mean(wav_data, axis=1)
data = wav_data / 32768.0
return data, sample_rate
| [
"io.BytesIO",
"soundfile.read",
"numpy.float32",
"tensorflow.compat.v1.disable_eager_execution",
"jina.executors.decorators.single",
"tensorflow.compat.v1.Session",
"numpy.mean"
] | [((2007, 2050), 'jina.executors.decorators.single', 'single', ([], {'slice_nargs': '(2)', 'flatten_output': '(False)'}), '(slice_nargs=2, flatten_output=False)\n', (2013, 2050), False, 'from jina.executors.decorators import batching, single\n'), ((796, 834), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (832, 834), True, 'import tensorflow as tf\n'), ((855, 877), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (875, 877), True, 'import tensorflow as tf\n'), ((3267, 3292), 'numpy.mean', 'np.mean', (['wav_data'], {'axis': '(1)'}), '(wav_data, axis=1)\n', (3274, 3292), True, 'import numpy as np\n'), ((1535, 1553), 'numpy.float32', 'np.float32', (['result'], {}), '(result)\n', (1545, 1553), True, 'import numpy as np\n'), ((2990, 3008), 'io.BytesIO', 'io.BytesIO', (['buffer'], {}), '(buffer)\n', (3000, 3008), False, 'import io\n'), ((3079, 3106), 'soundfile.read', 'sf.read', (['uri'], {'dtype': '"""int16"""'}), "(uri, dtype='int16')\n", (3086, 3106), True, 'import soundfile as sf\n')] |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import numpy as np
import sionna
from channel_test_utils import *
class TestChannelCoefficientsGenerator(unittest.TestCase):
r"""Test the computation of channel coefficients"""
# Batch size used to check the LSP distribution
BATCH_SIZE = 32
# Carrier frequency
CARRIER_FREQUENCY = 3.5e9 # Hz
# Maximum allowed deviation for calculation (relative error)
MAX_ERR = 1e-2
# # Heigh of UTs
H_UT = 1.5
# # Heigh of BSs
H_BS = 10.0
# # Number of BS
NB_BS = 3
# Number of UT
NB_UT = 10
# Number of channel time samples
NUM_SAMPLES = 64
# Sampling frequency
SAMPLING_FREQUENCY = 20e6
def setUp(self):
# Forcing the seed to make the tests deterministic
tf.random.set_seed(42)
fc = TestChannelCoefficientsGenerator.CARRIER_FREQUENCY
# UT and BS arrays have no impact on LSP
# However, these are needed to instantiate the model
self.tx_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=2,
num_cols_per_panel=2,
polarization='dual',
polarization_type='VH',
antenna_pattern='38.901',
carrier_frequency=fc,
dtype=tf.complex128)
self.rx_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=1,
num_cols_per_panel=1,
polarization='dual',
polarization_type='VH',
antenna_pattern='38.901',
carrier_frequency=fc,
dtype=tf.complex128)
self.ccg = sionna.channel.tr38901.ChannelCoefficientsGenerator(
fc,
tx_array=self.tx_array,
rx_array=self.rx_array,
subclustering=True,
dtype=tf.complex128)
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
nb_ut = TestChannelCoefficientsGenerator.NB_UT
nb_bs = TestChannelCoefficientsGenerator.NB_BS
h_ut = TestChannelCoefficientsGenerator.H_UT
h_bs = TestChannelCoefficientsGenerator.H_BS
rx_orientations = tf.random.uniform([batch_size, nb_ut, 3], 0.0,
2*np.pi, dtype=tf.float64)
tx_orientations = tf.random.uniform([batch_size, nb_bs, 3], 0.0,
2*np.pi, dtype=tf.float64)
ut_velocities = tf.random.uniform([batch_size, nb_ut, 3], 0.0, 5.0,
dtype=tf.float64)
scenario = sionna.channel.tr38901.RMaScenario(fc, self.rx_array,
self.tx_array,
"downlink",
dtype=tf.complex128)
ut_loc = generate_random_loc(batch_size, nb_ut, (100,2000),
(100,2000), (h_ut, h_ut), dtype=tf.float64)
bs_loc = generate_random_loc(batch_size, nb_bs, (0,100),
(0,100), (h_bs, h_bs),
dtype=tf.float64)
in_state = generate_random_bool(batch_size, nb_ut, 0.5)
scenario.set_topology(ut_loc, bs_loc, rx_orientations,
tx_orientations, ut_velocities, in_state)
self.scenario = scenario
topology = sionna.channel.tr38901.Topology(
velocities=ut_velocities,
moving_end='rx',
los_aoa=scenario.los_aoa,
los_aod=scenario.los_aod,
los_zoa=scenario.los_zoa,
los_zod=scenario.los_zod,
los=scenario.los,
distance_3d=scenario.distance_3d,
tx_orientations=tx_orientations,
rx_orientations=rx_orientations)
self.topology = topology
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
ray_sampler = sionna.channel.tr38901.RaysGenerator(scenario)
lsp_sampler.topology_updated_callback()
ray_sampler.topology_updated_callback()
lsp = lsp_sampler()
self.rays = ray_sampler(lsp)
self.lsp = lsp
num_time_samples = TestChannelCoefficientsGenerator.NUM_SAMPLES
sampling_frequency = TestChannelCoefficientsGenerator.SAMPLING_FREQUENCY
c_ds = scenario.get_param("cDS")*1e-9
_, _, phi, sample_times = self.ccg(num_time_samples,
sampling_frequency, lsp.k_factor, self.rays, topology, c_ds,
debug=True)
self.phi = phi.numpy()
self.sample_times = sample_times.numpy()
self.c_ds = c_ds
def max_rel_err(self, r, x):
"""Compute the maximum relative error, ``r`` being the reference value,
``x`` an esimate of ``r``."""
err = np.abs(r-x)
rel_err = np.where(np.abs(r) > 0.0, np.divide(err,np.abs(r)+1e-6), err)
return np.max(rel_err)
def unit_sphere_vector_ref(self, theta, phi):
"""Reference implementation: Unit to sphere vector"""
uvec = np.stack([np.sin(theta)*np.cos(phi),
np.sin(theta)*np.sin(phi), np.cos(theta)],
axis=-1)
uvec = np.expand_dims(uvec, axis=-1)
return uvec
def test_unit_sphere_vector(self):
"""Test 3GPP channel coefficient calculation: Unit sphere vector"""
#
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
theta = tf.random.normal(shape=[batch_size]).numpy()
phi = tf.random.normal(shape=[batch_size]).numpy()
uvec_ref = self.unit_sphere_vector_ref(theta, phi)
uvec = self.ccg._unit_sphere_vector(theta, phi).numpy()
max_err = self.max_rel_err(uvec_ref, uvec)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def forward_rotation_matrix_ref(self, orientations):
"""Reference implementation: Forward rotation matrix"""
a, b, c = orientations[...,0], orientations[...,1], orientations[...,2]
#
R = np.zeros(list(a.shape) + [3,3])
#
R[...,0,0] = np.cos(a)*np.cos(b)
R[...,1,0] = np.sin(a)*np.cos(b)
R[...,2,0] = -np.sin(b)
#
R[...,0,1] = np.cos(a)*np.sin(b)*np.sin(c) - np.sin(a)*np.cos(c)
R[...,1,1] = np.sin(a)*np.sin(b)*np.sin(c) + np.cos(a)*np.cos(c)
R[...,2,1] = np.cos(b)*np.sin(c)
#
R[...,0,2] = np.cos(a)*np.sin(b)*np.cos(c) + np.sin(a)*np.sin(c)
R[...,1,2] = np.sin(a)*np.sin(b)*np.cos(c) - np.cos(a)*np.sin(c)
R[...,2,2] = np.cos(b)*np.cos(c)
#
return R
def test_forward_rotation_matrix(self):
"""Test 3GPP channel coefficient calculation: Forward rotation matrix"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
R_ref = self.forward_rotation_matrix_ref(orientation)
R = self.ccg._forward_rotation_matrix(orientation).numpy()
max_err = self.max_rel_err(R_ref, R)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def reverse_rotation_matrix_ref(self, orientations):
"""Reference implementation: Reverse rotation matrix"""
R = self.forward_rotation_matrix_ref(orientations)
dim_ind = np.arange(len(R.shape))
dim_ind = np.concatenate([dim_ind[:-2], [dim_ind[-1]], [dim_ind[-2]]],
axis=0)
R_inv = np.transpose(R, dim_ind)
return R_inv
def test_reverse_rotation_matrix(self):
"""Test 3GPP channel coefficient calculation: Reverse rotation matrix"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
R_ref = self.reverse_rotation_matrix_ref(orientation)
R = self.ccg._reverse_rotation_matrix(orientation).numpy()
max_err = self.max_rel_err(R_ref, R)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def gcs_to_lcs_ref(self, orientations, theta, phi):
"""Reference implementation: GCS to LCS angles"""
rho = self.unit_sphere_vector_ref(theta, phi)
Rinv = self.reverse_rotation_matrix_ref(orientations)
rho_prime = Rinv@rho
x = np.array([1,0,0])
x = np.expand_dims(x, axis=-1)
x = np.broadcast_to(x, rho_prime.shape)
y = np.array([0,1,0])
y = np.expand_dims(y, axis=-1)
y = np.broadcast_to(y, rho_prime.shape)
z = np.array([0,0,1])
z = np.expand_dims(z, axis=-1)
z = np.broadcast_to(z, rho_prime.shape)
theta_prime = np.sum(rho_prime*z, axis=-2)
theta_prime = np.clip(theta_prime, -1., 1.)
theta_prime = np.arccos(theta_prime)
phi_prime = np.angle(np.sum(rho_prime*x, axis=-2)\
+ 1j*np.sum(rho_prime*y, axis=-2))
theta_prime = np.squeeze(theta_prime, axis=-1)
phi_prime = np.squeeze(phi_prime, axis=-1)
return (theta_prime, phi_prime)
def test_gcs_to_lcs(self):
"""Test 3GPP channel coefficient calculation: GCS to LCS"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
theta = tf.random.normal(shape=[batch_size]).numpy()
phi = tf.random.normal(shape=[batch_size]).numpy()
theta_prime_ref, phi_prime_ref = self.gcs_to_lcs_ref(orientation, theta,
phi)
theta_prime, phi_prime = self.ccg._gcs_to_lcs(
tf.cast(orientation, tf.float64),
tf.cast(theta, tf.float64),
tf.cast(phi, tf.float64))
theta_prime = theta_prime.numpy()
phi_prime = phi_prime.numpy()
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
max_err = self.max_rel_err(theta_prime_ref, theta_prime)
self.assertLessEqual(max_err, err_tol)
max_err = self.max_rel_err(phi_prime_ref, phi_prime)
self.assertLessEqual(max_err, err_tol)
def compute_psi_ref(self, orientations, theta, phi):
"""Reference implementation: Compute psi angle"""
a = orientations[...,0]
b = orientations[...,1]
c = orientations[...,2]
real = np.sin(c)*np.cos(theta)*np.sin(phi-a)\
+ np.cos(c)*(np.cos(b)*np.sin(theta)\
-np.sin(b)*np.cos(theta)*np.cos(phi-a))
imag = np.sin(c)*np.cos(phi-a) + np.sin(b)*np.cos(c)*np.sin(phi-a)
return np.angle(real+1j*imag)
def l2g_response_ref(self, F_prime, orientations, theta, phi):
"""Reference implementation: L2G response"""
psi = self.compute_psi_ref(orientations, theta, phi)
mat = np.zeros(list(np.shape(psi)) + [2,2])
mat[...,0,0] = np.cos(psi)
mat[...,0,1] = -np.sin(psi)
mat[...,1,0] = np.sin(psi)
mat[...,1,1] = np.cos(psi)
F = mat@np.expand_dims(F_prime, axis=-1)
return F
def test_l2g_response(self):
"""Test 3GPP channel coefficient calculation: L2G antenna response"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientation = tf.random.normal(shape=[batch_size,3]).numpy()
theta = tf.random.normal(shape=[batch_size]).numpy()
phi = tf.random.normal(shape=[batch_size]).numpy()
F_prime = tf.random.normal(shape=[batch_size,2]).numpy()
F_ref = self.l2g_response_ref(F_prime, orientation, theta, phi)
F = self.ccg._l2g_response( tf.cast(F_prime, tf.float64),
tf.cast(orientation,tf.float64),
tf.cast(theta, tf.float64),
tf.cast(phi, tf.float64)).numpy()
max_err = self.max_rel_err(F_ref, F)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def rot_pos_ref(self, orientations, positions):
R = self.forward_rotation_matrix_ref(orientations)
pos_r = R@positions
return pos_r
def rot_pos(self, orientations, positions):
"""Reference implementation: Rotate according to an orientation"""
R = self.forward_rotation_matrix_ref(orientations)
pos_r = R@positions
return pos_r
def test_rot_pos(self):
"""Test 3GPP channel coefficient calculation: Rotate position according
to orientation"""
batch_size = TestChannelCoefficientsGenerator.BATCH_SIZE
orientations = tf.random.normal(shape=[batch_size,3]).numpy()
positions = tf.random.normal(shape=[batch_size,3, 1]).numpy()
pos_r_ref = self.rot_pos_ref(orientations, positions)
pos_r = self.ccg._rot_pos( tf.cast(orientations, tf.float64),
tf.cast(positions, tf.float64)).numpy()
max_err = self.max_rel_err(pos_r_ref, pos_r)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_get_tx_antenna_positions_ref(self, topology):
"""Reference implementation: Positions of the TX array elements"""
tx_orientations = topology.tx_orientations.numpy()
# Antenna locations in LCS and reshape for broadcasting
ant_loc_lcs = self.tx_array.ant_pos.numpy()
ant_loc_lcs = np.expand_dims(np.expand_dims(
np.expand_dims(ant_loc_lcs, axis=0), axis=1), axis=-1)
# Antenna loc in GCS relative to BS location
tx_orientations = np.expand_dims(tx_orientations, axis=2)
ant_loc_gcs = np.squeeze(self.rot_pos_ref(tx_orientations, ant_loc_lcs),
axis=-1)
return ant_loc_gcs
def test_step_11_get_tx_antenna_positions(self):
"""Test 3GPP channel coefficient calculation: Positions of the TX array
elements"""
tx_ant_pos_ref= self.step_11_get_tx_antenna_positions_ref(self.topology)
tx_ant_pos = self.ccg._step_11_get_tx_antenna_positions(self.topology)
tx_ant_pos = tx_ant_pos.numpy()
max_err = self.max_rel_err(tx_ant_pos_ref, tx_ant_pos)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_get_rx_antenna_positions_ref(self, topology):
"""Reference implementation: Positions of the RX array elements"""
rx_orientations = topology.rx_orientations.numpy()
# Antenna locations in LCS and reshape for broadcasting
ant_loc_lcs = self.rx_array.ant_pos.numpy()
ant_loc_lcs = np.expand_dims(np.expand_dims(
np.expand_dims(ant_loc_lcs, axis=0), axis=1), axis=-1)
# Antenna loc in GCS relative to UT location
rx_orientations = np.expand_dims(rx_orientations, axis=2)
ant_loc_gcs = np.squeeze(self.rot_pos_ref(rx_orientations, ant_loc_lcs),
axis=-1)
return ant_loc_gcs
def test_step_11_get_rx_antenna_positions(self):
"""Test 3GPP channel coefficient calculation: Positions of the RX array
elements"""
rx_ant_pos_ref= self.step_11_get_rx_antenna_positions_ref(self.topology)
rx_ant_pos = self.ccg._step_11_get_rx_antenna_positions(self.topology)
rx_ant_pos = rx_ant_pos.numpy()
max_err = self.max_rel_err(rx_ant_pos_ref, rx_ant_pos)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_phase_matrix_ref(self, Phi, kappa):
"""Reference implementation: Phase matrix"""
xpr_scaling = np.sqrt(1./kappa)
H_phase = np.zeros(list(Phi.shape[:-1]) + [2,2])\
+1j*np.zeros(list(Phi.shape[:-1]) + [2,2])
H_phase[...,0,0] = np.exp(1j*Phi[...,0])
H_phase[...,0,1] = xpr_scaling*np.exp(1j*Phi[...,1])
H_phase[...,1,0] = xpr_scaling*np.exp(1j*Phi[...,2])
H_phase[...,1,1] = np.exp(1j*Phi[...,3])
return H_phase
def test_step_11_phase_matrix(self):
"""Test 3GPP channel coefficient calculation:
Phase matrix calculation"""
H_phase_ref = self.step_11_phase_matrix_ref(self.phi, self.rays.xpr)
H_phase = self.ccg._step_11_phase_matrix(self.phi, self.rays).numpy()
max_err = self.max_rel_err(H_phase_ref, H_phase)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_field_matrix_ref(self, topology, aoa, aod, zoa, zod, H_phase):
"""Reference implementation: Field matrix"""
tx_orientations = topology.tx_orientations.numpy()
rx_orientations = topology.rx_orientations.numpy()
# Convert departure angles to LCS
tx_orientations = np.expand_dims(np.expand_dims(
np.expand_dims(tx_orientations, axis=2), axis=2), axis=2)
zod_prime, aod_prime = self.gcs_to_lcs_ref(tx_orientations, zod, aod)
# Convert arrival angles to LCS
rx_orientations = np.expand_dims(np.expand_dims(
np.expand_dims(rx_orientations, axis=1), axis=3), axis=3)
zoa_prime, aoa_prime = self.gcs_to_lcs_ref(rx_orientations, zoa, aoa)
# Compute the TX antenna reponse in LCS and map it to GCS
F_tx_prime_pol1_1, F_tx_prime_pol1_2 = self.tx_array.ant_pol1.field(
tf.constant(zod_prime,tf.float64), tf.constant(aod_prime,tf.float64))
F_tx_prime_pol1_1 = F_tx_prime_pol1_1.numpy()
F_tx_prime_pol1_2 = F_tx_prime_pol1_2.numpy()
F_tx_prime_pol1 = np.stack([F_tx_prime_pol1_1, F_tx_prime_pol1_2],
axis=-1)
F_tx_pol1 = self.l2g_response_ref(F_tx_prime_pol1, tx_orientations,
zod, aod)
# Dual polarization case for TX
if (self.tx_array.polarization == 'dual'):
F_tx_prime_pol2_1, F_tx_prime_pol2_2 = self.tx_array.ant_pol2.field(
tf.constant(zod_prime, tf.float64),
tf.constant(aod_prime, tf.float64))
F_tx_prime_pol2_1 = F_tx_prime_pol2_1.numpy()
F_tx_prime_pol2_2 = F_tx_prime_pol2_2.numpy()
F_tx_prime_pol2 = np.stack([F_tx_prime_pol2_1, F_tx_prime_pol2_2],
axis=-1)
F_tx_pol2 = self.l2g_response_ref(F_tx_prime_pol2, tx_orientations,
zod, aod)
# Compute the RX antenna reponse in LCS and map it to GCS
F_rx_prime_pol1_1, F_rx_prime_pol1_2 = self.rx_array.ant_pol1.field(
tf.constant(zoa_prime, tf.float64),
tf.constant(aoa_prime, tf.float64))
F_rx_prime_pol1_1 = F_rx_prime_pol1_1.numpy()
F_rx_prime_pol1_2 = F_rx_prime_pol1_2.numpy()
F_rx_prime_pol1 = np.stack([F_rx_prime_pol1_1, F_rx_prime_pol1_2],
axis=-1)
F_rx_pol1 = self.l2g_response_ref(F_rx_prime_pol1, rx_orientations,
zoa, aoa)
# Dual polarization case for RX
if (self.rx_array.polarization == 'dual'):
F_rx_prime_pol2_1, F_rx_prime_pol2_2 = self.rx_array.ant_pol2.field(
tf.constant(zoa_prime, tf.float64),
tf.constant(aoa_prime, tf.float64))
F_rx_prime_pol2_1 = F_rx_prime_pol2_1.numpy()
F_rx_prime_pol2_2 = F_rx_prime_pol2_2.numpy()
F_rx_prime_pol2 = np.stack([F_rx_prime_pol2_1, F_rx_prime_pol2_2],
axis=-1)
F_rx_pol2 = self.l2g_response_ref(F_rx_prime_pol2, rx_orientations,
zoa, aoa)
# Compute prtoduct between the phase matrix and the TX antenna field.
F_tx_pol1 = H_phase@F_tx_pol1
if (self.tx_array.polarization == 'dual'):
F_tx_pol2 = H_phase@F_tx_pol2
# TX: Scatteing the antenna response
# Single polarization case is easy, as one only needs to repeat the same
# antenna response for all elements
F_tx_pol1 = np.expand_dims(np.squeeze(F_tx_pol1, axis=-1), axis=-2)
if (self.tx_array.polarization == 'single'):
F_tx = np.tile(F_tx_pol1, [1,1,1,1,1, self.tx_array.num_ant,1])
# Dual-polarization requires scatterting the responses at the right
# place
else:
F_tx_pol2 = np.expand_dims(np.squeeze(F_tx_pol2, axis=-1), axis=-2)
F_tx = np.zeros(F_tx_pol1.shape) + 1j*np.zeros(F_tx_pol1.shape)
F_tx = np.tile(F_tx, [1,1,1,1,1, self.tx_array.num_ant,1])
F_tx[:,:,:,:,:,self.tx_array.ant_ind_pol1,:] = F_tx_pol1
F_tx[:,:,:,:,:,self.tx_array.ant_ind_pol2,:] = F_tx_pol2
# RX: Scatteing the antenna response
# Single polarization case is easy, as one only needs to repeat the same
# antenna response for all elements
F_rx_pol1 = np.expand_dims(np.squeeze(F_rx_pol1, axis=-1), axis=-2)
if (self.rx_array.polarization == 'single'):
F_rx = np.tile(F_rx_pol1, [1,1,1,1,1,self.rx_array.num_ant,1])
# Dual-polarization requires scatterting the responses at the right
# place
else:
F_rx_pol2 = np.expand_dims(np.squeeze(F_rx_pol2, axis=-1), axis=-2)
F_rx = np.zeros(F_rx_pol1.shape) + 1j*np.zeros(F_rx_pol1.shape)
F_rx = np.tile(F_rx, [1,1,1,1,1,self.rx_array.num_ant,1])
F_rx[:,:,:,:,:,self.rx_array.ant_ind_pol1,:] = F_rx_pol1
F_rx[:,:,:,:,:,self.rx_array.ant_ind_pol2,:] = F_rx_pol2
# Computing H_field
F_tx = np.expand_dims(F_tx, axis=-3)
F_rx = np.expand_dims(F_rx, axis=-2)
H_field = np.sum(F_tx*F_rx, axis=-1)
return H_field
def test_step_11_field_matrix(self):
"""Test 3GPP channel coefficient calculation:
Field matrix calculation"""
H_phase = self.step_11_phase_matrix_ref(self.phi, self.rays.xpr)
H_field_ref = self.step_11_field_matrix_ref(self.topology,
self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
H_phase)
H_field = self.ccg._step_11_field_matrix(self.topology,
tf.constant(self.rays.aoa, tf.float64),
tf.constant(self.rays.aod, tf.float64),
tf.constant(self.rays.zoa, tf.float64),
tf.constant(self.rays.zod, tf.float64),
tf.constant(H_phase, tf.complex128)).numpy()
max_err = self.max_rel_err(H_field_ref, H_field)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_array_offsets_ref(self, aoa, aod, zoa, zod, topology):
"""Reference implementation: Array offset matrix"""
# Arrival spherical unit vector
r_hat_rx = np.squeeze(self.unit_sphere_vector_ref(zoa, aoa), axis=-1)
r_hat_rx = np.expand_dims(r_hat_rx, axis=-2)
# Departure spherical unit vector
r_hat_tx = np.squeeze(self.unit_sphere_vector_ref(zod, aod), axis=-1)
r_hat_tx = np.expand_dims(r_hat_tx, axis=-2)
# TX location vector
d_bar_tx = self.step_11_get_tx_antenna_positions_ref(topology)
d_bar_tx = np.expand_dims(np.expand_dims(
np.expand_dims(d_bar_tx, axis=2), axis=3), axis=4)
# RX location vector
d_bar_rx = self.step_11_get_rx_antenna_positions_ref(topology)
d_bar_rx = np.expand_dims(np.expand_dims(
np.expand_dims(d_bar_rx, axis=1), axis=3), axis=4)
lambda_0 = self.scenario.lambda_0.numpy()
# TX offset matrix
tx_offset = np.sum(r_hat_tx*d_bar_tx, axis=-1)
rx_offset = np.sum(r_hat_rx*d_bar_rx, axis=-1)
tx_offset = np.expand_dims(tx_offset, -2)
rx_offset = np.expand_dims(rx_offset, -1)
antenna_offset = np.exp(1j*2*np.pi*(tx_offset+rx_offset)/lambda_0)
return antenna_offset
def test_step_11_array_offsets(self):
"""Test 3GPP channel coefficient calculation: Array offset matrix"""
H_array_ref = self.step_11_array_offsets_ref(self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
self.topology)
H_array = self.ccg._step_11_array_offsets(self.topology,
tf.constant(self.rays.aoa, tf.float64),
tf.constant(self.rays.aod, tf.float64),
tf.constant(self.rays.zoa, tf.float64),
tf.constant(self.rays.zod, tf.float64)).numpy()
max_err = self.max_rel_err(H_array_ref, H_array)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_doppler_matrix_ref(self, topology, aoa, zoa, t):
"""Reference implementation: Array offset matrix"""
velocities = topology.velocities.numpy()
lambda_0 = self.scenario.lambda_0.numpy()
# Arrival spherical unit vector
r_hat_rx = np.squeeze(self.unit_sphere_vector_ref(zoa, aoa), axis=-1)
# Velocity vec
if topology.moving_end == "tx":
velocities = np.expand_dims(velocities, axis=2)
elif topology.moving_end == 'rx':
velocities = np.expand_dims(velocities, axis=1)
velocities = np.expand_dims(np.expand_dims(velocities, axis=3), axis=4)
# Doppler matrix
exponent = np.sum(r_hat_rx*velocities, axis=-1, keepdims=True)
exponent = exponent/lambda_0
exponent = 2*np.pi*exponent*t
H_doppler = np.exp(1j*exponent)
return H_doppler
def test_step_11_doppler_matrix(self):
"""Test 3GPP channel coefficient calculation: Doppler matrix"""
H_doppler_ref = self.step_11_doppler_matrix_ref(self.topology,
self.rays.aoa,
self.rays.zoa,
self.sample_times)
H_doppler = self.ccg._step_11_doppler_matrix(self.topology,
tf.constant(self.rays.aoa, tf.float64),
tf.constant(self.rays.zoa, tf.float64),
tf.constant(self.sample_times, tf.float64)).numpy()
max_err = self.max_rel_err(H_doppler_ref, H_doppler)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_nlos_ref(self, phi, aoa, aod, zoa, zod, kappa, powers, t,
topology):
"""Reference implemenrtation: Compute the channel matrix of the NLoS
component"""
H_phase = self.step_11_phase_matrix_ref(phi, kappa)
H_field = self.step_11_field_matrix_ref(topology, aoa, aod, zoa, zod,
H_phase)
H_array = self.step_11_array_offsets_ref(aoa, aod, zoa, zod, topology)
H_doppler = self.step_11_doppler_matrix_ref(topology, aoa, zoa, t)
H_field = np.expand_dims(H_field, axis=-1)
H_array = np.expand_dims(H_array, axis=-1)
H_doppler = np.expand_dims(np.expand_dims(H_doppler, axis=-2), axis=-3)
H_full = H_field*H_array*H_doppler
power_scaling = np.sqrt(powers/aoa.shape[4])
power_scaling = np.expand_dims(np.expand_dims(np.expand_dims(
np.expand_dims(power_scaling, axis=4), axis=5), axis=6), axis=7)
H_full = H_full*power_scaling
return H_full
def test_step_11_nlos_ref(self):
"""Test 3GPP channel coefficient calculation: Doppler matrix"""
H_full_ref = self.step_11_nlos_ref( self.phi,
self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
self.rays.xpr,
self.rays.powers,
self.sample_times,
self.topology)
H_full = self.ccg._step_11_nlos(tf.constant(self.phi, tf.float64),
self.topology,
self.rays,
tf.constant(self.sample_times, tf.float64)).numpy()
max_err = self.max_rel_err(H_full_ref, H_full)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_reduce_nlos_ref(self, H_full, powers, delays, c_DS):
"""Reference implementation: Compute the channel matrix of the NLoS
component 2"""
# Sorting clusters in descending roder
cluster_ordered = np.flip(np.argsort(powers, axis=3), axis=3)
delays_ordered = np.take_along_axis(delays, cluster_ordered, axis=3)
H_full_ordered = tf.gather(H_full, cluster_ordered, axis=3,
batch_dims=3).numpy()
## Weak clusters (all except first two)
delays_weak = delays_ordered[:,:,:,2:]
H_full_weak = np.sum(H_full_ordered[:,:,:,2:,:,:,:], axis=4)
## Strong clusters (first two)
# Each strong cluster is split into 3 sub-cluster
# Subcluster delays
strong_delays = delays_ordered[:,:,:,:2]
strong_delays = np.expand_dims(strong_delays, -1)
delays_expension = np.array([[[[[0.0, 1.28, 2.56]]]]])
c_DS = np.expand_dims(np.expand_dims(c_DS.numpy(), axis=-1), axis=-1)
strong_delays = strong_delays + delays_expension*c_DS
strong_delays = np.reshape(strong_delays,
list(strong_delays.shape[:-2]) + [-1])
# Subcluster coefficient
H_full_strong = H_full_ordered[:,:,:,:2,:,:,:]
H_full_subcl_1 = np.sum(np.take(H_full_strong, [0,1,2,3,4,5,6,7,18,19],
axis=4), axis=4)
H_full_subcl_2 = np.sum(np.take(H_full_strong, [8,9,10,11,16,17],
axis=4), axis=4)
H_full_subcl_3 = np.sum(np.take(H_full_strong, [12,13,14,15],
axis=4), axis=4)
H_full_strong_subcl = np.stack([H_full_subcl_1,H_full_subcl_2,
H_full_subcl_3], axis=3)
H_full_strong_subcl = np.transpose(H_full_strong_subcl,
[0,1,2,4,3,5,6,7])
H_full_strong_subcl = np.reshape(H_full_strong_subcl,
np.concatenate([H_full_strong_subcl.shape[:3], [-1],
H_full_strong_subcl.shape[5:]], axis=0))
## Putting together strong and weak clusters
H_nlos = np.concatenate([H_full_strong_subcl, H_full_weak], axis=3)
delays_nlos = np.concatenate([strong_delays, delays_weak], axis=3)
## Sorting
delays_sorted_ind = np.argsort(delays_nlos, axis=3)
delays_nlos = np.take_along_axis(delays_nlos, delays_sorted_ind, axis=3)
H_nlos = tf.gather(H_nlos, delays_sorted_ind,
axis=3, batch_dims=3).numpy()
return (H_nlos, delays_nlos)
def test_step_11_reduce_nlos(self):
"""Test 3GPP channel coefficient calculation: NLoS channel matrix
computation"""
H_full_ref = self.step_11_nlos_ref( self.phi,
self.rays.aoa,
self.rays.aod,
self.rays.zoa,
self.rays.zod,
self.rays.xpr,
self.rays.powers,
self.sample_times,
self.topology)
H_nlos_ref, delays_nlos_ref = self.step_11_reduce_nlos_ref(
H_full_ref,
self.rays.powers.numpy(),
self.rays.delays.numpy(),
self.c_ds)
H_nlos, delays_nlos = self.ccg._step_11_reduce_nlos(
tf.constant(H_full_ref, tf.complex128), self.rays, self.c_ds)
H_nlos = H_nlos.numpy()
delays_nlos = delays_nlos.numpy()
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
max_err = self.max_rel_err(H_nlos_ref, H_nlos)
self.assertLessEqual(max_err, err_tol)
max_err = self.max_rel_err(delays_nlos_ref, delays_nlos)
self.assertLessEqual(max_err, err_tol)
def step_11_los_ref(self, t, topology):
"""Reference implementation: Compute the channel matrix of the NLoS
component 2"""
# LoS departure and arrival angles
los_aoa = np.expand_dims(np.expand_dims(topology.los_aoa.numpy(),
axis=3), axis=4)
los_zoa = np.expand_dims(np.expand_dims(topology.los_zoa.numpy(),
axis=3), axis=4)
los_aod = np.expand_dims(np.expand_dims(topology.los_aod.numpy(),
axis=3), axis=4)
los_zod = np.expand_dims(np.expand_dims(topology.los_zod.numpy(),
axis=3), axis=4)
# Field matrix
H_phase = np.reshape(np.array([[1.,0.],
[0.,-1.]]), [1,1,1,1,1,2,2])
H_field = self.step_11_field_matrix_ref(topology, los_aoa, los_aod,
los_zoa, los_zod, H_phase)
# Array offset matrix
H_array = self.step_11_array_offsets_ref(los_aoa, los_aod, los_zoa,
los_zod, topology)
# Doppler matrix
H_doppler = self.step_11_doppler_matrix_ref(topology, los_aoa,
los_zoa, t)
# Phase shift due to propagation delay
d3D = topology.distance_3d.numpy()
lambda_0 = self.scenario.lambda_0.numpy()
H_delay = np.exp(1j*2*np.pi*d3D/lambda_0)
# Combining all to compute channel coefficient
H_field = np.expand_dims(np.squeeze(H_field, axis=4), axis=-1)
H_array = np.expand_dims(np.squeeze(H_array, axis=4), axis=-1)
H_doppler = np.expand_dims(H_doppler, axis=4)
H_delay = np.expand_dims(np.expand_dims(np.expand_dims(
np.expand_dims(H_delay, axis=3), axis=4), axis=5), axis=6)
H_los = H_field*H_array*H_doppler*H_delay
return H_los
def test_step11_los(self):
"""Test 3GPP channel coefficient calculation: LoS channel matrix"""
H_los_ref = self.step_11_los_ref(self.sample_times, self.topology)
H_los = self.ccg._step_11_los(self.topology, self.sample_times)
H_los = H_los.numpy()
max_err = self.max_rel_err(H_los_ref, H_los)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
self.assertLessEqual(max_err, err_tol)
def step_11_ref(self, phi, k_factor, aoa, aod, zoa, zod, kappa, powers,
delays, t, topology, c_ds):
"""Reference implementation: Step 11"""
## NLoS
H_full = self.step_11_nlos_ref(phi, aoa, aod, zoa, zod, kappa, powers,
t, topology)
H_nlos, delays_nlos = self.step_11_reduce_nlos_ref(H_full, powers,
delays, c_ds)
## LoS
H_los = self.step_11_los_ref(t, topology)
k_factor = np.reshape(k_factor, list(k_factor.shape) + [1,1,1,1])
los_scaling = np.sqrt(k_factor/(k_factor+1.))
nlos_scaling = np.sqrt(1./(k_factor+1.))
H_los_nlos = nlos_scaling*H_nlos
H_los_los = los_scaling*H_los
H_los_los = H_los_los + H_los_nlos[:,:,:,:1,...]
H_los = np.concatenate([H_los_los, H_los_nlos[:,:,:,1:,...]], axis=3)
## Setting up the CIR according to the link configuration
los_status = topology.los.numpy()
los_status = np.reshape(los_status, list(los_status.shape) + [1,1,1,1])
H = np.where(los_status, H_los, H_nlos)
return H, delays_nlos
def test_step_11(self):
"""Test 3GPP channel coefficient calculation: Step 11"""
H, delays_nlos = self.ccg._step_11(tf.constant(self.phi, tf.float64),
self.topology,
self.lsp.k_factor,
self.rays,
tf.constant(self.sample_times,
tf.float64),
self.c_ds)
H = H.numpy()
delays_nlos = delays_nlos.numpy()
H_ref, delays_nlos_ref = self.step_11_ref(self.phi,
self.lsp.k_factor.numpy(),
self.rays.aoa.numpy(),
self.rays.aod.numpy(),
self.rays.zoa.numpy(),
self.rays.zod.numpy(),
self.rays.xpr.numpy(),
self.rays.powers.numpy(),
self.rays.delays.numpy(),
self.sample_times,
self.topology,
self.c_ds)
err_tol = TestChannelCoefficientsGenerator.MAX_ERR
max_err = self.max_rel_err(H_ref, H)
self.assertLessEqual(max_err, err_tol)
max_err = self.max_rel_err(delays_nlos_ref, delays_nlos)
self.assertLessEqual(max_err, err_tol)
| [
"tensorflow.random.set_seed",
"numpy.abs",
"sionna.channel.tr38901.Topology",
"numpy.sum",
"sionna.channel.tr38901.RaysGenerator",
"numpy.angle",
"numpy.clip",
"numpy.argsort",
"numpy.shape",
"numpy.sin",
"numpy.exp",
"numpy.tile",
"sys.path.append",
"sionna.channel.tr38901.PanelArray",
... | [((267, 305), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (298, 305), True, 'import tensorflow as tf\n'), ((213, 235), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (228, 235), False, 'import sys\n'), ((426, 477), 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['gpus[gpu_num]', '"""GPU"""'], {}), "(gpus[gpu_num], 'GPU')\n", (455, 477), True, 'import tensorflow as tf\n'), ((537, 598), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[gpu_num]', '(True)'], {}), '(gpus[gpu_num], True)\n', (577, 598), True, 'import tensorflow as tf\n'), ((1415, 1437), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (1433, 1437), True, 'import tensorflow as tf\n'), ((1638, 1839), 'sionna.channel.tr38901.PanelArray', 'sionna.channel.tr38901.PanelArray', ([], {'num_rows_per_panel': '(2)', 'num_cols_per_panel': '(2)', 'polarization': '"""dual"""', 'polarization_type': '"""VH"""', 'antenna_pattern': '"""38.901"""', 'carrier_frequency': 'fc', 'dtype': 'tf.complex128'}), "(num_rows_per_panel=2, num_cols_per_panel=\n 2, polarization='dual', polarization_type='VH', antenna_pattern=\n '38.901', carrier_frequency=fc, dtype=tf.complex128)\n", (1671, 1839), False, 'import sionna\n'), ((2166, 2367), 'sionna.channel.tr38901.PanelArray', 'sionna.channel.tr38901.PanelArray', ([], {'num_rows_per_panel': '(1)', 'num_cols_per_panel': '(1)', 'polarization': '"""dual"""', 'polarization_type': '"""VH"""', 'antenna_pattern': '"""38.901"""', 'carrier_frequency': 'fc', 'dtype': 'tf.complex128'}), "(num_rows_per_panel=1, num_cols_per_panel=\n 1, polarization='dual', polarization_type='VH', antenna_pattern=\n '38.901', carrier_frequency=fc, dtype=tf.complex128)\n", (2199, 2367), False, 'import sionna\n'), ((2690, 2839), 'sionna.channel.tr38901.ChannelCoefficientsGenerator', 'sionna.channel.tr38901.ChannelCoefficientsGenerator', (['fc'], {'tx_array': 'self.tx_array', 'rx_array': 'self.rx_array', 'subclustering': '(True)', 'dtype': 'tf.complex128'}), '(fc, tx_array=self.\n tx_array, rx_array=self.rx_array, subclustering=True, dtype=tf.complex128)\n', (2741, 2839), False, 'import sionna\n'), ((3205, 3280), 'tensorflow.random.uniform', 'tf.random.uniform', (['[batch_size, nb_ut, 3]', '(0.0)', '(2 * np.pi)'], {'dtype': 'tf.float64'}), '([batch_size, nb_ut, 3], 0.0, 2 * np.pi, dtype=tf.float64)\n', (3222, 3280), True, 'import tensorflow as tf\n'), ((3349, 3424), 'tensorflow.random.uniform', 'tf.random.uniform', (['[batch_size, nb_bs, 3]', '(0.0)', '(2 * np.pi)'], {'dtype': 'tf.float64'}), '([batch_size, nb_bs, 3], 0.0, 2 * np.pi, dtype=tf.float64)\n', (3366, 3424), True, 'import tensorflow as tf\n'), ((3491, 3560), 'tensorflow.random.uniform', 'tf.random.uniform', (['[batch_size, nb_ut, 3]', '(0.0)', '(5.0)'], {'dtype': 'tf.float64'}), '([batch_size, nb_ut, 3], 0.0, 5.0, dtype=tf.float64)\n', (3508, 3560), True, 'import tensorflow as tf\n'), ((3629, 3734), 'sionna.channel.tr38901.RMaScenario', 'sionna.channel.tr38901.RMaScenario', (['fc', 'self.rx_array', 'self.tx_array', '"""downlink"""'], {'dtype': 'tf.complex128'}), "(fc, self.rx_array, self.tx_array,\n 'downlink', dtype=tf.complex128)\n", (3663, 3734), False, 'import sionna\n'), ((4507, 4822), 'sionna.channel.tr38901.Topology', 'sionna.channel.tr38901.Topology', ([], {'velocities': 'ut_velocities', 'moving_end': '"""rx"""', 'los_aoa': 'scenario.los_aoa', 'los_aod': 'scenario.los_aod', 'los_zoa': 'scenario.los_zoa', 'los_zod': 'scenario.los_zod', 'los': 'scenario.los', 'distance_3d': 'scenario.distance_3d', 'tx_orientations': 'tx_orientations', 'rx_orientations': 'rx_orientations'}), "(velocities=ut_velocities, moving_end='rx',\n los_aoa=scenario.los_aoa, los_aod=scenario.los_aod, los_zoa=scenario.\n los_zoa, los_zod=scenario.los_zod, los=scenario.los, distance_3d=\n scenario.distance_3d, tx_orientations=tx_orientations, rx_orientations=\n rx_orientations)\n", (4538, 4822), False, 'import sionna\n'), ((4981, 5026), 'sionna.channel.tr38901.LSPGenerator', 'sionna.channel.tr38901.LSPGenerator', (['scenario'], {}), '(scenario)\n', (5016, 5026), False, 'import sionna\n'), ((5049, 5095), 'sionna.channel.tr38901.RaysGenerator', 'sionna.channel.tr38901.RaysGenerator', (['scenario'], {}), '(scenario)\n', (5085, 5095), False, 'import sionna\n'), ((5909, 5922), 'numpy.abs', 'np.abs', (['(r - x)'], {}), '(r - x)\n', (5915, 5922), True, 'import numpy as np\n'), ((6016, 6031), 'numpy.max', 'np.max', (['rel_err'], {}), '(rel_err)\n', (6022, 6031), True, 'import numpy as np\n'), ((6320, 6349), 'numpy.expand_dims', 'np.expand_dims', (['uvec'], {'axis': '(-1)'}), '(uvec, axis=-1)\n', (6334, 6349), True, 'import numpy as np\n'), ((8544, 8612), 'numpy.concatenate', 'np.concatenate', (['[dim_ind[:-2], [dim_ind[-1]], [dim_ind[-2]]]'], {'axis': '(0)'}), '([dim_ind[:-2], [dim_ind[-1]], [dim_ind[-2]]], axis=0)\n', (8558, 8612), True, 'import numpy as np\n'), ((8665, 8689), 'numpy.transpose', 'np.transpose', (['R', 'dim_ind'], {}), '(R, dim_ind)\n', (8677, 8689), True, 'import numpy as np\n'), ((9524, 9543), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (9532, 9543), True, 'import numpy as np\n'), ((9554, 9580), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (9568, 9580), True, 'import numpy as np\n'), ((9593, 9628), 'numpy.broadcast_to', 'np.broadcast_to', (['x', 'rho_prime.shape'], {}), '(x, rho_prime.shape)\n', (9608, 9628), True, 'import numpy as np\n'), ((9642, 9661), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (9650, 9661), True, 'import numpy as np\n'), ((9672, 9698), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (9686, 9698), True, 'import numpy as np\n'), ((9711, 9746), 'numpy.broadcast_to', 'np.broadcast_to', (['y', 'rho_prime.shape'], {}), '(y, rho_prime.shape)\n', (9726, 9746), True, 'import numpy as np\n'), ((9760, 9779), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9768, 9779), True, 'import numpy as np\n'), ((9790, 9816), 'numpy.expand_dims', 'np.expand_dims', (['z'], {'axis': '(-1)'}), '(z, axis=-1)\n', (9804, 9816), True, 'import numpy as np\n'), ((9829, 9864), 'numpy.broadcast_to', 'np.broadcast_to', (['z', 'rho_prime.shape'], {}), '(z, rho_prime.shape)\n', (9844, 9864), True, 'import numpy as np\n'), ((9888, 9918), 'numpy.sum', 'np.sum', (['(rho_prime * z)'], {'axis': '(-2)'}), '(rho_prime * z, axis=-2)\n', (9894, 9918), True, 'import numpy as np\n'), ((9939, 9970), 'numpy.clip', 'np.clip', (['theta_prime', '(-1.0)', '(1.0)'], {}), '(theta_prime, -1.0, 1.0)\n', (9946, 9970), True, 'import numpy as np\n'), ((9991, 10013), 'numpy.arccos', 'np.arccos', (['theta_prime'], {}), '(theta_prime)\n', (10000, 10013), True, 'import numpy as np\n'), ((10143, 10175), 'numpy.squeeze', 'np.squeeze', (['theta_prime'], {'axis': '(-1)'}), '(theta_prime, axis=-1)\n', (10153, 10175), True, 'import numpy as np\n'), ((10196, 10226), 'numpy.squeeze', 'np.squeeze', (['phi_prime'], {'axis': '(-1)'}), '(phi_prime, axis=-1)\n', (10206, 10226), True, 'import numpy as np\n'), ((11771, 11799), 'numpy.angle', 'np.angle', (['(real + 1.0j * imag)'], {}), '(real + 1.0j * imag)\n', (11779, 11799), True, 'import numpy as np\n'), ((12053, 12064), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (12059, 12064), True, 'import numpy as np\n'), ((12124, 12135), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (12130, 12135), True, 'import numpy as np\n'), ((12159, 12170), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (12165, 12170), True, 'import numpy as np\n'), ((14780, 14819), 'numpy.expand_dims', 'np.expand_dims', (['tx_orientations'], {'axis': '(2)'}), '(tx_orientations, axis=2)\n', (14794, 14819), True, 'import numpy as np\n'), ((16009, 16048), 'numpy.expand_dims', 'np.expand_dims', (['rx_orientations'], {'axis': '(2)'}), '(rx_orientations, axis=2)\n', (16023, 16048), True, 'import numpy as np\n'), ((16854, 16874), 'numpy.sqrt', 'np.sqrt', (['(1.0 / kappa)'], {}), '(1.0 / kappa)\n', (16861, 16874), True, 'import numpy as np\n'), ((17012, 17038), 'numpy.exp', 'np.exp', (['(1.0j * Phi[..., 0])'], {}), '(1.0j * Phi[..., 0])\n', (17018, 17038), True, 'import numpy as np\n'), ((17183, 17209), 'numpy.exp', 'np.exp', (['(1.0j * Phi[..., 3])'], {}), '(1.0j * Phi[..., 3])\n', (17189, 17209), True, 'import numpy as np\n'), ((18783, 18840), 'numpy.stack', 'np.stack', (['[F_tx_prime_pol1_1, F_tx_prime_pol1_2]'], {'axis': '(-1)'}), '([F_tx_prime_pol1_1, F_tx_prime_pol1_2], axis=-1)\n', (18791, 18840), True, 'import numpy as np\n'), ((19960, 20017), 'numpy.stack', 'np.stack', (['[F_rx_prime_pol1_1, F_rx_prime_pol1_2]'], {'axis': '(-1)'}), '([F_rx_prime_pol1_1, F_rx_prime_pol1_2], axis=-1)\n', (19968, 20017), True, 'import numpy as np\n'), ((22677, 22706), 'numpy.expand_dims', 'np.expand_dims', (['F_tx'], {'axis': '(-3)'}), '(F_tx, axis=-3)\n', (22691, 22706), True, 'import numpy as np\n'), ((22722, 22751), 'numpy.expand_dims', 'np.expand_dims', (['F_rx'], {'axis': '(-2)'}), '(F_rx, axis=-2)\n', (22736, 22751), True, 'import numpy as np\n'), ((22770, 22798), 'numpy.sum', 'np.sum', (['(F_tx * F_rx)'], {'axis': '(-1)'}), '(F_tx * F_rx, axis=-1)\n', (22776, 22798), True, 'import numpy as np\n'), ((24304, 24337), 'numpy.expand_dims', 'np.expand_dims', (['r_hat_rx'], {'axis': '(-2)'}), '(r_hat_rx, axis=-2)\n', (24318, 24337), True, 'import numpy as np\n'), ((24479, 24512), 'numpy.expand_dims', 'np.expand_dims', (['r_hat_tx'], {'axis': '(-2)'}), '(r_hat_tx, axis=-2)\n', (24493, 24512), True, 'import numpy as np\n'), ((25045, 25081), 'numpy.sum', 'np.sum', (['(r_hat_tx * d_bar_tx)'], {'axis': '(-1)'}), '(r_hat_tx * d_bar_tx, axis=-1)\n', (25051, 25081), True, 'import numpy as np\n'), ((25100, 25136), 'numpy.sum', 'np.sum', (['(r_hat_rx * d_bar_rx)'], {'axis': '(-1)'}), '(r_hat_rx * d_bar_rx, axis=-1)\n', (25106, 25136), True, 'import numpy as np\n'), ((25156, 25185), 'numpy.expand_dims', 'np.expand_dims', (['tx_offset', '(-2)'], {}), '(tx_offset, -2)\n', (25170, 25185), True, 'import numpy as np\n'), ((25206, 25235), 'numpy.expand_dims', 'np.expand_dims', (['rx_offset', '(-1)'], {}), '(rx_offset, -1)\n', (25220, 25235), True, 'import numpy as np\n'), ((25261, 25322), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * (tx_offset + rx_offset) / lambda_0)'], {}), '(1.0j * 2 * np.pi * (tx_offset + rx_offset) / lambda_0)\n', (25267, 25322), True, 'import numpy as np\n'), ((27025, 27078), 'numpy.sum', 'np.sum', (['(r_hat_rx * velocities)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(r_hat_rx * velocities, axis=-1, keepdims=True)\n', (27031, 27078), True, 'import numpy as np\n'), ((27172, 27195), 'numpy.exp', 'np.exp', (['(1.0j * exponent)'], {}), '(1.0j * exponent)\n', (27178, 27195), True, 'import numpy as np\n'), ((28636, 28668), 'numpy.expand_dims', 'np.expand_dims', (['H_field'], {'axis': '(-1)'}), '(H_field, axis=-1)\n', (28650, 28668), True, 'import numpy as np\n'), ((28687, 28719), 'numpy.expand_dims', 'np.expand_dims', (['H_array'], {'axis': '(-1)'}), '(H_array, axis=-1)\n', (28701, 28719), True, 'import numpy as np\n'), ((28869, 28899), 'numpy.sqrt', 'np.sqrt', (['(powers / aoa.shape[4])'], {}), '(powers / aoa.shape[4])\n', (28876, 28899), True, 'import numpy as np\n'), ((30466, 30517), 'numpy.take_along_axis', 'np.take_along_axis', (['delays', 'cluster_ordered'], {'axis': '(3)'}), '(delays, cluster_ordered, axis=3)\n', (30484, 30517), True, 'import numpy as np\n'), ((30782, 30834), 'numpy.sum', 'np.sum', (['H_full_ordered[:, :, :, 2:, :, :, :]'], {'axis': '(4)'}), '(H_full_ordered[:, :, :, 2:, :, :, :], axis=4)\n', (30788, 30834), True, 'import numpy as np\n'), ((31029, 31062), 'numpy.expand_dims', 'np.expand_dims', (['strong_delays', '(-1)'], {}), '(strong_delays, -1)\n', (31043, 31062), True, 'import numpy as np\n'), ((31090, 31125), 'numpy.array', 'np.array', (['[[[[[0.0, 1.28, 2.56]]]]]'], {}), '([[[[[0.0, 1.28, 2.56]]]]])\n', (31098, 31125), True, 'import numpy as np\n'), ((31885, 31951), 'numpy.stack', 'np.stack', (['[H_full_subcl_1, H_full_subcl_2, H_full_subcl_3]'], {'axis': '(3)'}), '([H_full_subcl_1, H_full_subcl_2, H_full_subcl_3], axis=3)\n', (31893, 31951), True, 'import numpy as np\n'), ((32021, 32080), 'numpy.transpose', 'np.transpose', (['H_full_strong_subcl', '[0, 1, 2, 4, 3, 5, 6, 7]'], {}), '(H_full_strong_subcl, [0, 1, 2, 4, 3, 5, 6, 7])\n', (32033, 32080), True, 'import numpy as np\n'), ((32370, 32428), 'numpy.concatenate', 'np.concatenate', (['[H_full_strong_subcl, H_full_weak]'], {'axis': '(3)'}), '([H_full_strong_subcl, H_full_weak], axis=3)\n', (32384, 32428), True, 'import numpy as np\n'), ((32451, 32503), 'numpy.concatenate', 'np.concatenate', (['[strong_delays, delays_weak]'], {'axis': '(3)'}), '([strong_delays, delays_weak], axis=3)\n', (32465, 32503), True, 'import numpy as np\n'), ((32552, 32583), 'numpy.argsort', 'np.argsort', (['delays_nlos'], {'axis': '(3)'}), '(delays_nlos, axis=3)\n', (32562, 32583), True, 'import numpy as np\n'), ((32606, 32664), 'numpy.take_along_axis', 'np.take_along_axis', (['delays_nlos', 'delays_sorted_ind'], {'axis': '(3)'}), '(delays_nlos, delays_sorted_ind, axis=3)\n', (32624, 32664), True, 'import numpy as np\n'), ((35738, 35779), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * d3D / lambda_0)'], {}), '(1.0j * 2 * np.pi * d3D / lambda_0)\n', (35744, 35779), True, 'import numpy as np\n'), ((35988, 36021), 'numpy.expand_dims', 'np.expand_dims', (['H_doppler'], {'axis': '(4)'}), '(H_doppler, axis=4)\n', (36002, 36021), True, 'import numpy as np\n'), ((37373, 37409), 'numpy.sqrt', 'np.sqrt', (['(k_factor / (k_factor + 1.0))'], {}), '(k_factor / (k_factor + 1.0))\n', (37380, 37409), True, 'import numpy as np\n'), ((37428, 37459), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (k_factor + 1.0))'], {}), '(1.0 / (k_factor + 1.0))\n', (37435, 37459), True, 'import numpy as np\n'), ((37607, 37672), 'numpy.concatenate', 'np.concatenate', (['[H_los_los, H_los_nlos[:, :, :, 1:, ...]]'], {'axis': '(3)'}), '([H_los_los, H_los_nlos[:, :, :, 1:, ...]], axis=3)\n', (37621, 37672), True, 'import numpy as np\n'), ((37870, 37905), 'numpy.where', 'np.where', (['los_status', 'H_los', 'H_nlos'], {}), '(los_status, H_los, H_nlos)\n', (37878, 37905), True, 'import numpy as np\n'), ((7248, 7257), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (7254, 7257), True, 'import numpy as np\n'), ((7258, 7267), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (7264, 7267), True, 'import numpy as np\n'), ((7289, 7298), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (7295, 7298), True, 'import numpy as np\n'), ((7299, 7308), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (7305, 7308), True, 'import numpy as np\n'), ((7331, 7340), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (7337, 7340), True, 'import numpy as np\n'), ((7518, 7527), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (7524, 7527), True, 'import numpy as np\n'), ((7528, 7537), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (7534, 7537), True, 'import numpy as np\n'), ((7715, 7724), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (7721, 7724), True, 'import numpy as np\n'), ((7725, 7734), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (7731, 7734), True, 'import numpy as np\n'), ((10836, 10868), 'tensorflow.cast', 'tf.cast', (['orientation', 'tf.float64'], {}), '(orientation, tf.float64)\n', (10843, 10868), True, 'import tensorflow as tf\n'), ((10882, 10908), 'tensorflow.cast', 'tf.cast', (['theta', 'tf.float64'], {}), '(theta, tf.float64)\n', (10889, 10908), True, 'import tensorflow as tf\n'), ((10922, 10946), 'tensorflow.cast', 'tf.cast', (['phi', 'tf.float64'], {}), '(phi, tf.float64)\n', (10929, 10946), True, 'import tensorflow as tf\n'), ((12089, 12100), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (12095, 12100), True, 'import numpy as np\n'), ((12188, 12220), 'numpy.expand_dims', 'np.expand_dims', (['F_prime'], {'axis': '(-1)'}), '(F_prime, axis=-1)\n', (12202, 12220), True, 'import numpy as np\n'), ((17073, 17099), 'numpy.exp', 'np.exp', (['(1.0j * Phi[..., 1])'], {}), '(1.0j * Phi[..., 1])\n', (17079, 17099), True, 'import numpy as np\n'), ((17134, 17160), 'numpy.exp', 'np.exp', (['(1.0j * Phi[..., 2])'], {}), '(1.0j * Phi[..., 2])\n', (17140, 17160), True, 'import numpy as np\n'), ((18579, 18613), 'tensorflow.constant', 'tf.constant', (['zod_prime', 'tf.float64'], {}), '(zod_prime, tf.float64)\n', (18590, 18613), True, 'import tensorflow as tf\n'), ((18614, 18648), 'tensorflow.constant', 'tf.constant', (['aod_prime', 'tf.float64'], {}), '(aod_prime, tf.float64)\n', (18625, 18648), True, 'import tensorflow as tf\n'), ((19406, 19463), 'numpy.stack', 'np.stack', (['[F_tx_prime_pol2_1, F_tx_prime_pol2_2]'], {'axis': '(-1)'}), '([F_tx_prime_pol2_1, F_tx_prime_pol2_2], axis=-1)\n', (19414, 19463), True, 'import numpy as np\n'), ((19742, 19776), 'tensorflow.constant', 'tf.constant', (['zoa_prime', 'tf.float64'], {}), '(zoa_prime, tf.float64)\n', (19753, 19776), True, 'import tensorflow as tf\n'), ((19790, 19824), 'tensorflow.constant', 'tf.constant', (['aoa_prime', 'tf.float64'], {}), '(aoa_prime, tf.float64)\n', (19801, 19824), True, 'import tensorflow as tf\n'), ((20551, 20608), 'numpy.stack', 'np.stack', (['[F_rx_prime_pol2_1, F_rx_prime_pol2_2]'], {'axis': '(-1)'}), '([F_rx_prime_pol2_1, F_rx_prime_pol2_2], axis=-1)\n', (20559, 20608), True, 'import numpy as np\n'), ((21147, 21177), 'numpy.squeeze', 'np.squeeze', (['F_tx_pol1'], {'axis': '(-1)'}), '(F_tx_pol1, axis=-1)\n', (21157, 21177), True, 'import numpy as np\n'), ((21260, 21321), 'numpy.tile', 'np.tile', (['F_tx_pol1', '[1, 1, 1, 1, 1, self.tx_array.num_ant, 1]'], {}), '(F_tx_pol1, [1, 1, 1, 1, 1, self.tx_array.num_ant, 1])\n', (21267, 21321), True, 'import numpy as np\n'), ((21598, 21654), 'numpy.tile', 'np.tile', (['F_tx', '[1, 1, 1, 1, 1, self.tx_array.num_ant, 1]'], {}), '(F_tx, [1, 1, 1, 1, 1, self.tx_array.num_ant, 1])\n', (21605, 21654), True, 'import numpy as np\n'), ((21994, 22024), 'numpy.squeeze', 'np.squeeze', (['F_rx_pol1'], {'axis': '(-1)'}), '(F_rx_pol1, axis=-1)\n', (22004, 22024), True, 'import numpy as np\n'), ((22107, 22168), 'numpy.tile', 'np.tile', (['F_rx_pol1', '[1, 1, 1, 1, 1, self.rx_array.num_ant, 1]'], {}), '(F_rx_pol1, [1, 1, 1, 1, 1, self.rx_array.num_ant, 1])\n', (22114, 22168), True, 'import numpy as np\n'), ((22444, 22500), 'numpy.tile', 'np.tile', (['F_rx', '[1, 1, 1, 1, 1, self.rx_array.num_ant, 1]'], {}), '(F_rx, [1, 1, 1, 1, 1, self.rx_array.num_ant, 1])\n', (22451, 22500), True, 'import numpy as np\n'), ((26763, 26797), 'numpy.expand_dims', 'np.expand_dims', (['velocities'], {'axis': '(2)'}), '(velocities, axis=2)\n', (26777, 26797), True, 'import numpy as np\n'), ((26936, 26970), 'numpy.expand_dims', 'np.expand_dims', (['velocities'], {'axis': '(3)'}), '(velocities, axis=3)\n', (26950, 26970), True, 'import numpy as np\n'), ((28755, 28789), 'numpy.expand_dims', 'np.expand_dims', (['H_doppler'], {'axis': '(-2)'}), '(H_doppler, axis=-2)\n', (28769, 28789), True, 'import numpy as np\n'), ((30404, 30430), 'numpy.argsort', 'np.argsort', (['powers'], {'axis': '(3)'}), '(powers, axis=3)\n', (30414, 30430), True, 'import numpy as np\n'), ((31516, 31580), 'numpy.take', 'np.take', (['H_full_strong', '[0, 1, 2, 3, 4, 5, 6, 7, 18, 19]'], {'axis': '(4)'}), '(H_full_strong, [0, 1, 2, 3, 4, 5, 6, 7, 18, 19], axis=4)\n', (31523, 31580), True, 'import numpy as np\n'), ((31645, 31699), 'numpy.take', 'np.take', (['H_full_strong', '[8, 9, 10, 11, 16, 17]'], {'axis': '(4)'}), '(H_full_strong, [8, 9, 10, 11, 16, 17], axis=4)\n', (31652, 31699), True, 'import numpy as np\n'), ((31768, 31816), 'numpy.take', 'np.take', (['H_full_strong', '[12, 13, 14, 15]'], {'axis': '(4)'}), '(H_full_strong, [12, 13, 14, 15], axis=4)\n', (31775, 31816), True, 'import numpy as np\n'), ((32192, 32289), 'numpy.concatenate', 'np.concatenate', (['[H_full_strong_subcl.shape[:3], [-1], H_full_strong_subcl.shape[5:]]'], {'axis': '(0)'}), '([H_full_strong_subcl.shape[:3], [-1], H_full_strong_subcl.\n shape[5:]], axis=0)\n', (32206, 32289), True, 'import numpy as np\n'), ((33913, 33951), 'tensorflow.constant', 'tf.constant', (['H_full_ref', 'tf.complex128'], {}), '(H_full_ref, tf.complex128)\n', (33924, 33951), True, 'import tensorflow as tf\n'), ((34977, 35012), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, -1.0]]'], {}), '([[1.0, 0.0], [0.0, -1.0]])\n', (34985, 35012), True, 'import numpy as np\n'), ((35859, 35886), 'numpy.squeeze', 'np.squeeze', (['H_field'], {'axis': '(4)'}), '(H_field, axis=4)\n', (35869, 35886), True, 'import numpy as np\n'), ((35930, 35957), 'numpy.squeeze', 'np.squeeze', (['H_array'], {'axis': '(4)'}), '(H_array, axis=4)\n', (35940, 35957), True, 'import numpy as np\n'), ((38074, 38107), 'tensorflow.constant', 'tf.constant', (['self.phi', 'tf.float64'], {}), '(self.phi, tf.float64)\n', (38085, 38107), True, 'import tensorflow as tf\n'), ((38330, 38372), 'tensorflow.constant', 'tf.constant', (['self.sample_times', 'tf.float64'], {}), '(self.sample_times, tf.float64)\n', (38341, 38372), True, 'import tensorflow as tf\n'), ((5948, 5957), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (5954, 5957), True, 'import numpy as np\n'), ((6252, 6265), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6258, 6265), True, 'import numpy as np\n'), ((6577, 6613), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size]'}), '(shape=[batch_size])\n', (6593, 6613), True, 'import tensorflow as tf\n'), ((6636, 6672), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size]'}), '(shape=[batch_size])\n', (6652, 6672), True, 'import tensorflow as tf\n'), ((7392, 7401), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (7398, 7401), True, 'import numpy as np\n'), ((7404, 7413), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (7410, 7413), True, 'import numpy as np\n'), ((7414, 7423), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (7420, 7423), True, 'import numpy as np\n'), ((7465, 7474), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (7471, 7474), True, 'import numpy as np\n'), ((7477, 7486), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (7483, 7486), True, 'import numpy as np\n'), ((7487, 7496), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (7493, 7496), True, 'import numpy as np\n'), ((7589, 7598), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (7595, 7598), True, 'import numpy as np\n'), ((7601, 7610), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (7607, 7610), True, 'import numpy as np\n'), ((7611, 7620), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (7617, 7620), True, 'import numpy as np\n'), ((7662, 7671), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (7668, 7671), True, 'import numpy as np\n'), ((7674, 7683), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (7680, 7683), True, 'import numpy as np\n'), ((7684, 7693), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (7690, 7693), True, 'import numpy as np\n'), ((7975, 8014), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 3]'}), '(shape=[batch_size, 3])\n', (7991, 8014), True, 'import tensorflow as tf\n'), ((8924, 8963), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 3]'}), '(shape=[batch_size, 3])\n', (8940, 8963), True, 'import tensorflow as tf\n'), ((10043, 10073), 'numpy.sum', 'np.sum', (['(rho_prime * x)'], {'axis': '(-2)'}), '(rho_prime * x, axis=-2)\n', (10049, 10073), True, 'import numpy as np\n'), ((10455, 10494), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 3]'}), '(shape=[batch_size, 3])\n', (10471, 10494), True, 'import tensorflow as tf\n'), ((10518, 10554), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size]'}), '(shape=[batch_size])\n', (10534, 10554), True, 'import tensorflow as tf\n'), ((10577, 10613), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size]'}), '(shape=[batch_size])\n', (10593, 10613), True, 'import tensorflow as tf\n'), ((11560, 11575), 'numpy.sin', 'np.sin', (['(phi - a)'], {}), '(phi - a)\n', (11566, 11575), True, 'import numpy as np\n'), ((11589, 11598), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (11595, 11598), True, 'import numpy as np\n'), ((11696, 11705), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (11702, 11705), True, 'import numpy as np\n'), ((11706, 11721), 'numpy.cos', 'np.cos', (['(phi - a)'], {}), '(phi - a)\n', (11712, 11721), True, 'import numpy as np\n'), ((11742, 11757), 'numpy.sin', 'np.sin', (['(phi - a)'], {}), '(phi - a)\n', (11748, 11757), True, 'import numpy as np\n'), ((12437, 12476), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 3]'}), '(shape=[batch_size, 3])\n', (12453, 12476), True, 'import tensorflow as tf\n'), ((12500, 12536), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size]'}), '(shape=[batch_size])\n', (12516, 12536), True, 'import tensorflow as tf\n'), ((12559, 12595), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size]'}), '(shape=[batch_size])\n', (12575, 12595), True, 'import tensorflow as tf\n'), ((12622, 12661), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 2]'}), '(shape=[batch_size, 2])\n', (12638, 12661), True, 'import tensorflow as tf\n'), ((13779, 13818), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 3]'}), '(shape=[batch_size, 3])\n', (13795, 13818), True, 'import tensorflow as tf\n'), ((13846, 13888), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, 3, 1]'}), '(shape=[batch_size, 3, 1])\n', (13862, 13888), True, 'import tensorflow as tf\n'), ((14645, 14680), 'numpy.expand_dims', 'np.expand_dims', (['ant_loc_lcs'], {'axis': '(0)'}), '(ant_loc_lcs, axis=0)\n', (14659, 14680), True, 'import numpy as np\n'), ((15874, 15909), 'numpy.expand_dims', 'np.expand_dims', (['ant_loc_lcs'], {'axis': '(0)'}), '(ant_loc_lcs, axis=0)\n', (15888, 15909), True, 'import numpy as np\n'), ((18042, 18081), 'numpy.expand_dims', 'np.expand_dims', (['tx_orientations'], {'axis': '(2)'}), '(tx_orientations, axis=2)\n', (18056, 18081), True, 'import numpy as np\n'), ((18288, 18327), 'numpy.expand_dims', 'np.expand_dims', (['rx_orientations'], {'axis': '(1)'}), '(rx_orientations, axis=1)\n', (18302, 18327), True, 'import numpy as np\n'), ((19172, 19206), 'tensorflow.constant', 'tf.constant', (['zod_prime', 'tf.float64'], {}), '(zod_prime, tf.float64)\n', (19183, 19206), True, 'import tensorflow as tf\n'), ((19224, 19258), 'tensorflow.constant', 'tf.constant', (['aod_prime', 'tf.float64'], {}), '(aod_prime, tf.float64)\n', (19235, 19258), True, 'import tensorflow as tf\n'), ((20317, 20351), 'tensorflow.constant', 'tf.constant', (['zoa_prime', 'tf.float64'], {}), '(zoa_prime, tf.float64)\n', (20328, 20351), True, 'import tensorflow as tf\n'), ((20369, 20403), 'tensorflow.constant', 'tf.constant', (['aoa_prime', 'tf.float64'], {}), '(aoa_prime, tf.float64)\n', (20380, 20403), True, 'import tensorflow as tf\n'), ((21462, 21492), 'numpy.squeeze', 'np.squeeze', (['F_tx_pol2'], {'axis': '(-1)'}), '(F_tx_pol2, axis=-1)\n', (21472, 21492), True, 'import numpy as np\n'), ((21522, 21547), 'numpy.zeros', 'np.zeros', (['F_tx_pol1.shape'], {}), '(F_tx_pol1.shape)\n', (21530, 21547), True, 'import numpy as np\n'), ((22308, 22338), 'numpy.squeeze', 'np.squeeze', (['F_rx_pol2'], {'axis': '(-1)'}), '(F_rx_pol2, axis=-1)\n', (22318, 22338), True, 'import numpy as np\n'), ((22368, 22393), 'numpy.zeros', 'np.zeros', (['F_rx_pol1.shape'], {}), '(F_rx_pol1.shape)\n', (22376, 22393), True, 'import numpy as np\n'), ((24676, 24708), 'numpy.expand_dims', 'np.expand_dims', (['d_bar_tx'], {'axis': '(2)'}), '(d_bar_tx, axis=2)\n', (24690, 24708), True, 'import numpy as np\n'), ((24894, 24926), 'numpy.expand_dims', 'np.expand_dims', (['d_bar_rx'], {'axis': '(1)'}), '(d_bar_rx, axis=1)\n', (24908, 24926), True, 'import numpy as np\n'), ((26865, 26899), 'numpy.expand_dims', 'np.expand_dims', (['velocities'], {'axis': '(1)'}), '(velocities, axis=1)\n', (26879, 26899), True, 'import numpy as np\n'), ((30543, 30599), 'tensorflow.gather', 'tf.gather', (['H_full', 'cluster_ordered'], {'axis': '(3)', 'batch_dims': '(3)'}), '(H_full, cluster_ordered, axis=3, batch_dims=3)\n', (30552, 30599), True, 'import tensorflow as tf\n'), ((32682, 32740), 'tensorflow.gather', 'tf.gather', (['H_nlos', 'delays_sorted_ind'], {'axis': '(3)', 'batch_dims': '(3)'}), '(H_nlos, delays_sorted_ind, axis=3, batch_dims=3)\n', (32691, 32740), True, 'import tensorflow as tf\n'), ((5979, 5988), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (5985, 5988), True, 'import numpy as np\n'), ((6170, 6183), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6176, 6183), True, 'import numpy as np\n'), ((6184, 6195), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (6190, 6195), True, 'import numpy as np\n'), ((6225, 6238), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6231, 6238), True, 'import numpy as np\n'), ((6239, 6250), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (6245, 6250), True, 'import numpy as np\n'), ((7372, 7381), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (7378, 7381), True, 'import numpy as np\n'), ((7382, 7391), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (7388, 7391), True, 'import numpy as np\n'), ((7445, 7454), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (7451, 7454), True, 'import numpy as np\n'), ((7455, 7464), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (7461, 7464), True, 'import numpy as np\n'), ((7569, 7578), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (7575, 7578), True, 'import numpy as np\n'), ((7579, 7588), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (7585, 7588), True, 'import numpy as np\n'), ((7642, 7651), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (7648, 7651), True, 'import numpy as np\n'), ((7652, 7661), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (7658, 7661), True, 'import numpy as np\n'), ((10090, 10120), 'numpy.sum', 'np.sum', (['(rho_prime * y)'], {'axis': '(-2)'}), '(rho_prime * y, axis=-2)\n', (10096, 10120), True, 'import numpy as np\n'), ((11536, 11545), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (11542, 11545), True, 'import numpy as np\n'), ((11546, 11559), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (11552, 11559), True, 'import numpy as np\n'), ((11722, 11731), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (11728, 11731), True, 'import numpy as np\n'), ((11732, 11741), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (11738, 11741), True, 'import numpy as np\n'), ((12006, 12019), 'numpy.shape', 'np.shape', (['psi'], {}), '(psi)\n', (12014, 12019), True, 'import numpy as np\n'), ((12778, 12806), 'tensorflow.cast', 'tf.cast', (['F_prime', 'tf.float64'], {}), '(F_prime, tf.float64)\n', (12785, 12806), True, 'import tensorflow as tf\n'), ((12844, 12876), 'tensorflow.cast', 'tf.cast', (['orientation', 'tf.float64'], {}), '(orientation, tf.float64)\n', (12851, 12876), True, 'import tensorflow as tf\n'), ((12913, 12939), 'tensorflow.cast', 'tf.cast', (['theta', 'tf.float64'], {}), '(theta, tf.float64)\n', (12920, 12939), True, 'import tensorflow as tf\n'), ((12977, 13001), 'tensorflow.cast', 'tf.cast', (['phi', 'tf.float64'], {}), '(phi, tf.float64)\n', (12984, 13001), True, 'import tensorflow as tf\n'), ((13995, 14028), 'tensorflow.cast', 'tf.cast', (['orientations', 'tf.float64'], {}), '(orientations, tf.float64)\n', (14002, 14028), True, 'import tensorflow as tf\n'), ((14066, 14096), 'tensorflow.cast', 'tf.cast', (['positions', 'tf.float64'], {}), '(positions, tf.float64)\n', (14073, 14096), True, 'import tensorflow as tf\n'), ((21553, 21578), 'numpy.zeros', 'np.zeros', (['F_tx_pol1.shape'], {}), '(F_tx_pol1.shape)\n', (21561, 21578), True, 'import numpy as np\n'), ((22399, 22424), 'numpy.zeros', 'np.zeros', (['F_rx_pol1.shape'], {}), '(F_rx_pol1.shape)\n', (22407, 22424), True, 'import numpy as np\n'), ((23522, 23560), 'tensorflow.constant', 'tf.constant', (['self.rays.aoa', 'tf.float64'], {}), '(self.rays.aoa, tf.float64)\n', (23533, 23560), True, 'import tensorflow as tf\n'), ((23598, 23636), 'tensorflow.constant', 'tf.constant', (['self.rays.aod', 'tf.float64'], {}), '(self.rays.aod, tf.float64)\n', (23609, 23636), True, 'import tensorflow as tf\n'), ((23674, 23712), 'tensorflow.constant', 'tf.constant', (['self.rays.zoa', 'tf.float64'], {}), '(self.rays.zoa, tf.float64)\n', (23685, 23712), True, 'import tensorflow as tf\n'), ((23750, 23788), 'tensorflow.constant', 'tf.constant', (['self.rays.zod', 'tf.float64'], {}), '(self.rays.zod, tf.float64)\n', (23761, 23788), True, 'import tensorflow as tf\n'), ((23826, 23861), 'tensorflow.constant', 'tf.constant', (['H_phase', 'tf.complex128'], {}), '(H_phase, tf.complex128)\n', (23837, 23861), True, 'import tensorflow as tf\n'), ((25900, 25938), 'tensorflow.constant', 'tf.constant', (['self.rays.aoa', 'tf.float64'], {}), '(self.rays.aoa, tf.float64)\n', (25911, 25938), True, 'import tensorflow as tf\n'), ((25972, 26010), 'tensorflow.constant', 'tf.constant', (['self.rays.aod', 'tf.float64'], {}), '(self.rays.aod, tf.float64)\n', (25983, 26010), True, 'import tensorflow as tf\n'), ((26044, 26082), 'tensorflow.constant', 'tf.constant', (['self.rays.zoa', 'tf.float64'], {}), '(self.rays.zoa, tf.float64)\n', (26055, 26082), True, 'import tensorflow as tf\n'), ((26116, 26154), 'tensorflow.constant', 'tf.constant', (['self.rays.zod', 'tf.float64'], {}), '(self.rays.zod, tf.float64)\n', (26127, 26154), True, 'import tensorflow as tf\n'), ((27719, 27757), 'tensorflow.constant', 'tf.constant', (['self.rays.aoa', 'tf.float64'], {}), '(self.rays.aoa, tf.float64)\n', (27730, 27757), True, 'import tensorflow as tf\n'), ((27787, 27825), 'tensorflow.constant', 'tf.constant', (['self.rays.zoa', 'tf.float64'], {}), '(self.rays.zoa, tf.float64)\n', (27798, 27825), True, 'import tensorflow as tf\n'), ((27855, 27897), 'tensorflow.constant', 'tf.constant', (['self.sample_times', 'tf.float64'], {}), '(self.sample_times, tf.float64)\n', (27866, 27897), True, 'import tensorflow as tf\n'), ((28984, 29021), 'numpy.expand_dims', 'np.expand_dims', (['power_scaling'], {'axis': '(4)'}), '(power_scaling, axis=4)\n', (28998, 29021), True, 'import numpy as np\n'), ((29795, 29828), 'tensorflow.constant', 'tf.constant', (['self.phi', 'tf.float64'], {}), '(self.phi, tf.float64)\n', (29806, 29828), True, 'import tensorflow as tf\n'), ((29940, 29982), 'tensorflow.constant', 'tf.constant', (['self.sample_times', 'tf.float64'], {}), '(self.sample_times, tf.float64)\n', (29951, 29982), True, 'import tensorflow as tf\n'), ((36102, 36133), 'numpy.expand_dims', 'np.expand_dims', (['H_delay'], {'axis': '(3)'}), '(H_delay, axis=3)\n', (36116, 36133), True, 'import numpy as np\n'), ((11600, 11609), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (11606, 11609), True, 'import numpy as np\n'), ((11610, 11623), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (11616, 11623), True, 'import numpy as np\n'), ((11666, 11681), 'numpy.cos', 'np.cos', (['(phi - a)'], {}), '(phi - a)\n', (11672, 11681), True, 'import numpy as np\n'), ((11642, 11651), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (11648, 11651), True, 'import numpy as np\n'), ((11652, 11665), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (11658, 11665), True, 'import numpy as np\n')] |
from pymatch.util import ApproximateStringMatching, deBrujin32Bit, bit_not
import numpy as np
class LEAP(ApproximateStringMatching):
def __init__(self, dna1, dna2, k, E, penalty=None, forward=None,
originLanes=None, destinationLanes=None, hurdleCost=1):
if len(dna1) > len(dna2):
# swap the two
temp = dna1
dna1 = dna2
dna2 = temp
# Enable de-Brujin acceleration
self.deBrujin = True
super().__init__(dna1, dna2)
self.k = k # Maximum differences allowed
self.E = E # Maximum energy given
#TODO use callable hurdleCost so that the cost is changeable
self.hurdleCost = hurdleCost # The cost of hurdles
# Initialize LEAP
self.start = np.zeros((2*k + 1, E + 1))
self.start.fill(float('-inf'))
self.end = np.zeros((2*k + 1, E + 1))
self.end.fill(float('-inf'))
if self.deBrujin:
self.hurdles = []
self.lookUpTable = deBrujin32Bit()
else:
self.hurdles = np.zeros((2*k + 1, self.m))
self.initHurdleVectors()
self.originLanes = {0: 0} if originLanes is None else originLanes
if destinationLanes is None:
self.destinationLanes = {0: self.m}
else:
self.destinationLanes = destinationLanes
# Define essential cost functions
self.penalty = penalty
self.forward = forward
# Store result of edit distance calculation
self.finalLane = None
self.finalEnergy = None
def leapLanePenalty(self, l_, l):
"""
Returns the penalty of leaping from lane l_ to l. When l and l_ are
the same lane, then the penalty is just the energy cost of next hurdle.
"""
if self.penalty is not None:
return self.penalty(l_, l)
else: # Use default penalty
if l_ == l:
return self.hurdleCost
else:
return self.hurdleCost * abs(l_ - l)
def leapForwardColumn(self, l_, l, pos=0):
"""
Returns the number of columns the toad moves forward when leaping from lane l_ to l.
When l and l_ are the same lane, then the number should just be 1.
"""
if self.forward is not None:
return self.forward(l_, l)
else: # Use default layout
if l_ == l:
return 1 if pos < self.m else 0
elif abs(l_) > abs(l) and l * l_ >= 0:
return 0
elif abs(l_) < abs(l) and l * l_ >= 0:
return abs(l - l_)
else:
return abs(l - l_) - abs(l_)
def initHurdleVectors(self):
"""
Detect hurdles in the swimming pool and encode it using bit vectors
"""
for i in range(2 * self.k + 1):
lane = i - self.k
if lane <= 0:
hurdles = [self.match(x, x - lane) for x in range(lane + 1, self.m + 1 + lane)]
else:
hurdles = [self.match(x, x - lane) for x in range(1, self.m + 1)]
if self.deBrujin:
hurdlesBits = ['0' if x or x is None else '1' for x in reversed(hurdles)]
hurdlesInt = int("".join(hurdlesBits), 2)
self.hurdles.append(hurdlesInt)
else:
self.hurdles[i] = hurdles
print(self.hurdles)
def verticesToHurdle(self, lane, position):
lane = int(lane)
if position >= self.m - 1:
return 0
tempPos = int(position) if position >= 0 else 0
if self.deBrujin:
shiftBitVec = int(self.hurdles[lane + self.k]) >> tempPos
b_LSB = shiftBitVec & (~shiftBitVec + 1)
b_LSB *= 0x6EB14F9
b_LSB = b_LSB >> 27
return self.lookUpTable[b_LSB]
else:
while self.hurdles[lane + self.k][tempPos + 1] != 0:
tempPos += 1
if tempPos >= self.m - 1:
break
return tempPos - position
def editDistance(self):
# Initialization
finalEnergy = float('inf')
k = self.k
for l in range(-k, k+1):
if l in self.originLanes:
self.start[l+k][0] = self.originLanes[l]
length = self.verticesToHurdle(l, self.start[l+k][0])
self.end[l+k][0] = self.start[l+k][0] + length
for e in range(1, self.E+1):
for l in range(-k, k+1):
for l_ in range(-k, k+1):
e_ = e - self.leapLanePenalty(l_, l)
if e_ >= 0:
candidateStart = self.end[l_+k][e_] + self.leapForwardColumn(l_, l, self.start[l_+k][e_])
if candidateStart > self.start[l+k][e]:
candidateStart = self.m if candidateStart > self.m else candidateStart
self.start[l+k][e] = candidateStart
length = self.verticesToHurdle(l, self.start[l+k][e])
self.end[l+k][e] = self.start[l+k][e] + length
if l in self.destinationLanes and self.end[l+k][e] >= self.destinationLanes[l] - 1:
if e < finalEnergy:
self.finalLane = l
self.finalEnergy = e
return True
return False
def backtrack(self):
path = []
l = self.finalLane
e = self.finalEnergy
k = self.k
pathCount = 1
path.append({"lane": l,
"start": self.start[l + k][e],
"end": self.end[l + k][e]})
while l not in self.originLanes or self.start[l+k][e] != self.originLanes[l]:
for l_ in range(-k, k+1):
e_ = e - self.leapLanePenalty(l_, l)
if self.end[l_+k][e_] + self.leapForwardColumn(l_, l) == self.start[l+k][e]:
l = l_
e = e_
break
path.insert(0, {"lane": l,
"start": self.start[l + k][e],
"end": self.end[l + k][e]})
pathCount += 1
return path, pathCount
if __name__ == "__main__":
prob = LEAP("ACTAGAACTT", "ACTTAGCACT", 2, 10)
prob.editDistance()
print(prob.finalLane, prob.finalEnergy)
path, pathCount = prob.backtrack()
print(path, pathCount)
| [
"pymatch.util.deBrujin32Bit",
"numpy.zeros"
] | [((803, 831), 'numpy.zeros', 'np.zeros', (['(2 * k + 1, E + 1)'], {}), '((2 * k + 1, E + 1))\n', (811, 831), True, 'import numpy as np\n'), ((888, 916), 'numpy.zeros', 'np.zeros', (['(2 * k + 1, E + 1)'], {}), '((2 * k + 1, E + 1))\n', (896, 916), True, 'import numpy as np\n'), ((1039, 1054), 'pymatch.util.deBrujin32Bit', 'deBrujin32Bit', ([], {}), '()\n', (1052, 1054), False, 'from pymatch.util import ApproximateStringMatching, deBrujin32Bit, bit_not\n'), ((1096, 1125), 'numpy.zeros', 'np.zeros', (['(2 * k + 1, self.m)'], {}), '((2 * k + 1, self.m))\n', (1104, 1125), True, 'import numpy as np\n')] |
import os
import numpy as np
import tensorflow as tf
import cv2
import download
import input
import model
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('download_data', False, 'whether to download, extract image data')
flags.DEFINE_string('download_dir', './downloads/', 'directory path to download data')
flags.DEFINE_string('train_dir', './images/train/', 'directory path to training set')
flags.DEFINE_string('test_dir', './images/test/', 'directory path to test set')
flags.DEFINE_integer('input_height', 256, 'resized image height, model input')
flags.DEFINE_integer('input_width', 256, 'resized image width, model input')
flags.DEFINE_string('mode', 'train', 'train or test')
flags.DEFINE_boolean('load_ckpt', True, 'whether to try restoring model from checkpoint')
flags.DEFINE_string('ckpt_dir', './checkpoints/', 'directory path to checkpoint files')
flags.DEFINE_integer('epoch', 10, 'total number of epoch to train')
flags.DEFINE_integer('batch_size', 4, 'size of batch')
flags.DEFINE_integer('min_queue_examples', 1000, 'minimum number of elements in batch queue')
flags.DEFINE_float('learning_rate', 0.0001, 'learning rate')
flags.DEFINE_float('l1_weight', 100, 'weight on L1 term for generator')
flags.DEFINE_float('beta1', 0.5, 'adam optimizer beta1 parameter')
flags.DEFINE_string('log_dir', './logs/', 'directory path to write summary')
def main(argv):
m = model.Model(FLAGS.log_dir, FLAGS.ckpt_dir, FLAGS.load_ckpt, FLAGS.input_height, FLAGS.input_width)
if FLAGS.mode == 'train':
train(m)
elif FLAGS.mode == 'test':
test(m)
else:
print('Unexpected mode: {} Choose \'train\' or \'test\''.format(FLAGS.mode))
m.close()
def train(m):
if FLAGS.download_data:
google_drive_file_id = '0B7EVK8r0v71pZjFTYXZWM3FlRnM'
download_path = os.path.join(FLAGS.download_dir, 'img_align_celeba.zip')
download.maybe_download_from_google_drive(google_drive_file_id, download_path)
download.maybe_extract(download_path, FLAGS.train_dir, FLAGS.test_dir)
training_inputs, count = input.inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.min_queue_examples,
FLAGS.input_height, FLAGS.input_width)
steps_per_epoch = int(count / FLAGS.batch_size)
test_inputs, _ = input.inputs(FLAGS.test_dir, FLAGS.batch_size, 0, FLAGS.input_height, FLAGS.input_width)
m.train(training_inputs, test_inputs,
FLAGS.epoch, steps_per_epoch, FLAGS.learning_rate, FLAGS.l1_weight, FLAGS.beta1, FLAGS.load_ckpt)
def test(m):
class DrawingState:
def __init__(self):
self.x_prev = 0
self.y_prev = 0
self.drawing = False
self.update = True
def interactive_drawing(event, x, y, flags, param):
image = param[0]
state = param[1]
if event == cv2.EVENT_LBUTTONDOWN:
state.drawing = True
state.x_prev, state.y_prev = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if state.drawing:
cv2.line(image, (state.x_prev, state.y_prev), (x, y), (1, 1, 1), 1)
state.x_prev = x
state.y_prev = y
state.update = True
elif event == cv2.EVENT_LBUTTONUP:
state.drawing = False
elif event == cv2.EVENT_RBUTTONDOWN:
image.fill(0)
state.update = True
cv2.namedWindow('Canvas')
image_input = np.zeros((FLAGS.input_height, FLAGS.input_width, 3), np.float32)
state = DrawingState()
cv2.setMouseCallback('Canvas', interactive_drawing, [image_input, state])
while cv2.getWindowProperty('Canvas', 0) >= 0:
if state.update:
reshaped_image_input = np.array([image_input])
image_output = m.test(reshaped_image_input)
concatenated = np.concatenate((image_input, image_output[0]), axis=1)
color_converted = cv2.cvtColor(concatenated, cv2.COLOR_RGB2BGR)
cv2.imshow('Canvas', color_converted)
state.update = False
k = cv2.waitKey(1) & 0xFF
if k == 27: # esc
break
cv2.destroyAllWindows()
if __name__ == '__main__':
tf.app.run()
| [
"cv2.line",
"os.path.join",
"numpy.concatenate",
"download.maybe_extract",
"cv2.cvtColor",
"cv2.waitKey",
"model.Model",
"numpy.zeros",
"cv2.imshow",
"cv2.setMouseCallback",
"numpy.array",
"input.inputs",
"download.maybe_download_from_google_drive",
"cv2.destroyAllWindows",
"tensorflow.a... | [((1401, 1504), 'model.Model', 'model.Model', (['FLAGS.log_dir', 'FLAGS.ckpt_dir', 'FLAGS.load_ckpt', 'FLAGS.input_height', 'FLAGS.input_width'], {}), '(FLAGS.log_dir, FLAGS.ckpt_dir, FLAGS.load_ckpt, FLAGS.\n input_height, FLAGS.input_width)\n', (1412, 1504), False, 'import model\n'), ((2087, 2203), 'input.inputs', 'input.inputs', (['FLAGS.train_dir', 'FLAGS.batch_size', 'FLAGS.min_queue_examples', 'FLAGS.input_height', 'FLAGS.input_width'], {}), '(FLAGS.train_dir, FLAGS.batch_size, FLAGS.min_queue_examples,\n FLAGS.input_height, FLAGS.input_width)\n', (2099, 2203), False, 'import input\n'), ((2316, 2409), 'input.inputs', 'input.inputs', (['FLAGS.test_dir', 'FLAGS.batch_size', '(0)', 'FLAGS.input_height', 'FLAGS.input_width'], {}), '(FLAGS.test_dir, FLAGS.batch_size, 0, FLAGS.input_height, FLAGS\n .input_width)\n', (2328, 2409), False, 'import input\n'), ((3418, 3443), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Canvas"""'], {}), "('Canvas')\n", (3433, 3443), False, 'import cv2\n'), ((3462, 3526), 'numpy.zeros', 'np.zeros', (['(FLAGS.input_height, FLAGS.input_width, 3)', 'np.float32'], {}), '((FLAGS.input_height, FLAGS.input_width, 3), np.float32)\n', (3470, 3526), True, 'import numpy as np\n'), ((3558, 3631), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Canvas"""', 'interactive_drawing', '[image_input, state]'], {}), "('Canvas', interactive_drawing, [image_input, state])\n", (3578, 3631), False, 'import cv2\n'), ((4148, 4171), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4169, 4171), False, 'import cv2\n'), ((4205, 4217), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4215, 4217), True, 'import tensorflow as tf\n'), ((1834, 1890), 'os.path.join', 'os.path.join', (['FLAGS.download_dir', '"""img_align_celeba.zip"""'], {}), "(FLAGS.download_dir, 'img_align_celeba.zip')\n", (1846, 1890), False, 'import os\n'), ((1899, 1977), 'download.maybe_download_from_google_drive', 'download.maybe_download_from_google_drive', (['google_drive_file_id', 'download_path'], {}), '(google_drive_file_id, download_path)\n', (1940, 1977), False, 'import download\n'), ((1986, 2056), 'download.maybe_extract', 'download.maybe_extract', (['download_path', 'FLAGS.train_dir', 'FLAGS.test_dir'], {}), '(download_path, FLAGS.train_dir, FLAGS.test_dir)\n', (2008, 2056), False, 'import download\n'), ((3642, 3676), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""Canvas"""', '(0)'], {}), "('Canvas', 0)\n", (3663, 3676), False, 'import cv2\n'), ((3743, 3766), 'numpy.array', 'np.array', (['[image_input]'], {}), '([image_input])\n', (3751, 3766), True, 'import numpy as np\n'), ((3850, 3904), 'numpy.concatenate', 'np.concatenate', (['(image_input, image_output[0])'], {'axis': '(1)'}), '((image_input, image_output[0]), axis=1)\n', (3864, 3904), True, 'import numpy as np\n'), ((3935, 3980), 'cv2.cvtColor', 'cv2.cvtColor', (['concatenated', 'cv2.COLOR_RGB2BGR'], {}), '(concatenated, cv2.COLOR_RGB2BGR)\n', (3947, 3980), False, 'import cv2\n'), ((3993, 4030), 'cv2.imshow', 'cv2.imshow', (['"""Canvas"""', 'color_converted'], {}), "('Canvas', color_converted)\n", (4003, 4030), False, 'import cv2\n'), ((4077, 4091), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4088, 4091), False, 'import cv2\n'), ((3063, 3130), 'cv2.line', 'cv2.line', (['image', '(state.x_prev, state.y_prev)', '(x, y)', '(1, 1, 1)', '(1)'], {}), '(image, (state.x_prev, state.y_prev), (x, y), (1, 1, 1), 1)\n', (3071, 3130), False, 'import cv2\n')] |
from matplotlib import pyplot
import numpy
import sys
import os
def plot_nx(length_frequencies, total_length, output_dir):
figure = pyplot.figure()
axes = pyplot.axes()
legend_names = list()
x1 = 0
y_prev = None
x_coords = list()
y_coords = list()
for length,frequency in length_frequencies:
for i in range(frequency):
y = length
width = float(length) / float(total_length)
x2 = x1 + width
if y_prev is not None:
x_coords.extend([x1, x1])
y_coords.extend([y_prev, y])
x_coords.extend([x1, x2])
y_coords.extend([y, y])
x1 = x2
y_prev = y
if y_coords[-1] != 0:
y_coords.append(0)
x_coords.append(x_coords[-1])
axes.plot(x_coords, y_coords, linewidth=0.6)
axes.axvline(0.5, linestyle="--", alpha=0.3, linewidth=0.7, zorder=-1)
axes.set_xlim([0, 1])
axes.set_title("Nx")
axes.set_xlabel("Cumulative coverage (normalized to 1)")
axes.set_ylabel("Length")
path = os.path.join(output_dir, "Nx.svg")
sys.stderr.write("SAVING FIGURE: %s\n" % path)
figure.savefig(path, dpi=200)
pyplot.close()
def plot_iterative_histogram(iterative_histogram, output_dir):
figure = pyplot.figure()
axes = pyplot.axes()
bounds = numpy.array(iterative_histogram.edges)
center = (bounds[:-1] + bounds[1:]) / 2
axes.bar(center, iterative_histogram.histogram, width=iterative_histogram.bin_size, align="center")
axes.set_xlabel("Read length (bp)")
axes.set_ylabel("Frequency")
path = os.path.join(output_dir, "histogram.svg")
sys.stderr.write("SAVING FIGURE: %s\n" % path)
figure.savefig(path, dpi=200)
pyplot.close()
| [
"matplotlib.pyplot.axes",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.array",
"sys.stderr.write",
"os.path.join"
] | [((139, 154), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (152, 154), False, 'from matplotlib import pyplot\n'), ((166, 179), 'matplotlib.pyplot.axes', 'pyplot.axes', ([], {}), '()\n', (177, 179), False, 'from matplotlib import pyplot\n'), ((1089, 1123), 'os.path.join', 'os.path.join', (['output_dir', '"""Nx.svg"""'], {}), "(output_dir, 'Nx.svg')\n", (1101, 1123), False, 'import os\n'), ((1128, 1174), 'sys.stderr.write', 'sys.stderr.write', (["('SAVING FIGURE: %s\\n' % path)"], {}), "('SAVING FIGURE: %s\\n' % path)\n", (1144, 1174), False, 'import sys\n'), ((1214, 1228), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (1226, 1228), False, 'from matplotlib import pyplot\n'), ((1307, 1322), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (1320, 1322), False, 'from matplotlib import pyplot\n'), ((1334, 1347), 'matplotlib.pyplot.axes', 'pyplot.axes', ([], {}), '()\n', (1345, 1347), False, 'from matplotlib import pyplot\n'), ((1362, 1400), 'numpy.array', 'numpy.array', (['iterative_histogram.edges'], {}), '(iterative_histogram.edges)\n', (1373, 1400), False, 'import numpy\n'), ((1637, 1678), 'os.path.join', 'os.path.join', (['output_dir', '"""histogram.svg"""'], {}), "(output_dir, 'histogram.svg')\n", (1649, 1678), False, 'import os\n'), ((1683, 1729), 'sys.stderr.write', 'sys.stderr.write', (["('SAVING FIGURE: %s\\n' % path)"], {}), "('SAVING FIGURE: %s\\n' % path)\n", (1699, 1729), False, 'import sys\n'), ((1769, 1783), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (1781, 1783), False, 'from matplotlib import pyplot\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for space_to_batch_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_space_to_batch_nd_tests(options):
"""Make a set of tests to do space_to_batch_nd."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# 3D case.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4]],
"block_shape": [[2]],
"paddings": [[[0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
]
def build_graph(parameters):
"""Build a space_to_batch graph given `parameters`."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.compat.v1.placeholder(
dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get paddings either as a const or as a placeholder (tensor).
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.compat.v1.placeholder(
dtype=tf.int32, name="paddings", shape=shape)
input_tensors.append(paddings)
out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
if options.use_experimental_converter:
# Remove unsupported dimension cases. Currently, kernel supports 3 and 4-D
# inputs.
test_parameters = [
test_parameters[0], test_parameters[1], test_parameters[3]
]
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=56)
| [
"tensorflow.compat.v1.space_to_batch_nd",
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"numpy.array",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function"
] | [((1115, 1144), 'tensorflow.lite.testing.zip_test_utils.register_make_test_function', 'register_make_test_function', ([], {}), '()\n', (1142, 1144), False, 'from tensorflow.lite.testing.zip_test_utils import register_make_test_function\n'), ((4290, 4389), 'tensorflow.lite.testing.zip_test_utils.make_zip_of_tests', 'make_zip_of_tests', (['options', 'test_parameters', 'build_graph', 'build_inputs'], {'expected_tf_failures': '(56)'}), '(options, test_parameters, build_graph, build_inputs,\n expected_tf_failures=56)\n', (4307, 4389), False, 'from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\n'), ((2670, 2773), 'tensorflow.compat.v1.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': "parameters['dtype']", 'name': '"""input"""', 'shape': "parameters['input_shape']"}), "(dtype=parameters['dtype'], name='input', shape=\n parameters['input_shape'])\n", (2694, 2773), True, 'import tensorflow.compat.v1 as tf\n'), ((3537, 3594), 'tensorflow.compat.v1.space_to_batch_nd', 'tf.space_to_batch_nd', (['input_tensor', 'block_shape', 'paddings'], {}), '(input_tensor, block_shape, paddings)\n', (3557, 3594), True, 'import tensorflow.compat.v1 as tf\n'), ((3066, 3133), 'tensorflow.compat.v1.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'name': '"""shape"""', 'shape': 'shape'}), "(dtype=tf.int32, name='shape', shape=shape)\n", (3090, 3133), True, 'import tensorflow.compat.v1 as tf\n'), ((3407, 3477), 'tensorflow.compat.v1.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'name': '"""paddings"""', 'shape': 'shape'}), "(dtype=tf.int32, name='paddings', shape=shape)\n", (3431, 3477), True, 'import tensorflow.compat.v1 as tf\n'), ((3706, 3772), 'tensorflow.lite.testing.zip_test_utils.create_tensor_data', 'create_tensor_data', (["parameters['dtype']", "parameters['input_shape']"], {}), "(parameters['dtype'], parameters['input_shape'])\n", (3724, 3772), False, 'from tensorflow.lite.testing.zip_test_utils import create_tensor_data\n'), ((3846, 3881), 'numpy.array', 'np.array', (["parameters['block_shape']"], {}), "(parameters['block_shape'])\n", (3854, 3881), True, 'import numpy as np\n'), ((3947, 3979), 'numpy.array', 'np.array', (["parameters['paddings']"], {}), "(parameters['paddings'])\n", (3955, 3979), True, 'import numpy as np\n')] |
import argparse
import os
import json
import re
import numpy as np
# python eval.py -c weights/VTN-10-liver -g YOUR_GPU_DEVICES
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint', type=str, default='weights/VTN-10-liver',
help='Specifies a previous checkpoint to load')
parser.add_argument('-r', '--rep', type=int, default=1,
help='Number of times of shared-weight cascading')
parser.add_argument('-g', '--gpu', type=str, default='-1',
help='Specifies gpu device(s)')
parser.add_argument('-d', '--dataset', type=str, default=None,
help='Specifies a data config')
parser.add_argument('-v', '--val_subset', type=str, default=None)
parser.add_argument('--batch', type=int, default=4, help='Size of minibatch')
parser.add_argument('--fast_reconstruction', action='store_true')
parser.add_argument('--paired', action='store_true')
parser.add_argument('--data_args', type=str, default=None)
parser.add_argument('--net_args', type=str, default=None)
parser.add_argument('--name', type=str, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
import tensorflow as tf
import tflearn
import network
import data_util.liver
import data_util.brain
def main():
if args.checkpoint is None:
print('Checkpoint must be specified!')
return
if ':' in args.checkpoint:
args.checkpoint, steps = args.checkpoint.split(':')
steps = int(steps)
else:
steps = None
args.checkpoint = find_checkpoint_step(args.checkpoint, steps)
print(args.checkpoint)
model_dir = os.path.dirname(args.checkpoint)
try:
with open(os.path.join(model_dir, 'args.json'), 'r') as f:
model_args = json.load(f)
print(model_args)
except Exception as e:
print(e)
model_args = {}
if args.dataset is None:
args.dataset = model_args['dataset']
if args.data_args is None:
args.data_args = model_args['data_args']
Framework = network.FrameworkUnsupervised
Framework.net_args['base_network'] = model_args['base_network']
Framework.net_args['n_cascades'] = model_args['n_cascades']
Framework.net_args['rep'] = args.rep
Framework.net_args.update(eval('dict({})'.format(model_args['net_args'])))
if args.net_args is not None:
Framework.net_args.update(eval('dict({})'.format(args.net_args)))
with open(os.path.join(args.dataset), 'r') as f:
cfg = json.load(f)
image_size = cfg.get('image_size', [128, 128, 128])
image_type = cfg.get('image_type')
gpus = 0 if args.gpu == '-1' else len(args.gpu.split(','))
framework = Framework(devices=gpus, image_size=image_size, segmentation_class_value=cfg.get(
'segmentation_class_value', None), fast_reconstruction=args.fast_reconstruction, validation=True)
print('Graph built')
Dataset = eval('data_util.{}.Dataset'.format(image_type))
ds = Dataset(args.dataset, batch_size=args.batch, paired=args.paired, **
eval('dict({})'.format(args.data_args)))
sess = tf.Session()
saver = tf.train.Saver(tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES))
checkpoint = args.checkpoint
saver.restore(sess, checkpoint)
tflearn.is_training(False, session=sess)
val_subsets = [data_util.liver.Split.VALID]
if args.val_subset is not None:
val_subsets = args.val_subset.split(',')
tflearn.is_training(False, session=sess)
if not os.path.exists('pair'):
os.mkdir('pair')
for val_subset in val_subsets:
print("Validation subset {}".format(val_subset))
gen = ds.generator(val_subset, loop=False)
results = framework.my_validate(sess, gen, keys=None, summary=False)
indexname = ['warped_moving','warped_moving0','warped_moving2','warped_moving3',\
'warped_moving4','warped_moving5','warped_moving6','warped_moving7','warped_moving8',\
'warped_moving9','warped_moving10','image_fixed']
warped_moving = results['warped_moving'][0][0][:,:,:,0]
warped_moving0 = results['warped_moving_0'][0][0][:,:,:,0]
warped_moving1 = results['warped_moving_1'][0][0][:,:,:,0]
warped_moving2 = results['warped_moving_2'][0][0][:,:,:,0]
warped_moving3 = results['warped_moving_3'][0][0][:,:,:,0]
warped_moving4 = results['warped_moving_4'][0][0][:,:,:,0]
warped_moving5 = results['warped_moving_5'][0][0][:,:,:,0]
warped_moving6 = results['warped_moving_6'][0][0][:,:,:,0]
warped_moving7 = results['warped_moving_7'][0][0][:,:,:,0]
warped_moving8 = results['warped_moving_8'][0][0][:,:,:,0]
warped_moving9 = results['warped_moving_9'][0][0][:,:,:,0]
warped_moving10 = results['warped_moving_10'][0][0][:,:,:,0]
image_fixed = results['image_fixed'][0][0][:,:,:,0]
np.savez('array_save.npz',warped_moving,warped_moving0,warped_moving1,warped_moving2,warped_moving3,\
warped_moving4,warped_moving5,warped_moving6,warped_moving7,warped_moving8,\
warped_moving9,warped_moving10,image_fixed,indexname)
def find_checkpoint_step(checkpoint_path, target_steps=None):
pattern = re.compile(r'model-(\d+).index')
checkpoints = []
for f in os.listdir(checkpoint_path):
m = pattern.match(f)
if m:
steps = int(m.group(1))
checkpoints.append((-steps if target_steps is None else abs(
target_steps - steps), os.path.join(checkpoint_path, f.replace('.index', ''))))
return min(checkpoints, key=lambda x: x[0])[1]
if __name__ == '__main__':
main()
| [
"os.mkdir",
"json.load",
"argparse.ArgumentParser",
"tensorflow.get_collection",
"os.path.dirname",
"tensorflow.Session",
"os.path.exists",
"tflearn.is_training",
"numpy.savez",
"os.path.join",
"os.listdir",
"re.compile"
] | [((137, 162), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (160, 162), False, 'import argparse\n'), ((1645, 1677), 'os.path.dirname', 'os.path.dirname', (['args.checkpoint'], {}), '(args.checkpoint)\n', (1660, 1677), False, 'import os\n'), ((3132, 3144), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3142, 3144), True, 'import tensorflow as tf\n'), ((3305, 3345), 'tflearn.is_training', 'tflearn.is_training', (['(False)'], {'session': 'sess'}), '(False, session=sess)\n', (3324, 3345), False, 'import tflearn\n'), ((3485, 3525), 'tflearn.is_training', 'tflearn.is_training', (['(False)'], {'session': 'sess'}), '(False, session=sess)\n', (3504, 3525), False, 'import tflearn\n'), ((5260, 5292), 're.compile', 're.compile', (['"""model-(\\\\d+).index"""'], {}), "('model-(\\\\d+).index')\n", (5270, 5292), False, 'import re\n'), ((5327, 5354), 'os.listdir', 'os.listdir', (['checkpoint_path'], {}), '(checkpoint_path)\n', (5337, 5354), False, 'import os\n'), ((2515, 2527), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2524, 2527), False, 'import json\n'), ((3173, 3221), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (3190, 3221), True, 'import tensorflow as tf\n'), ((3537, 3559), 'os.path.exists', 'os.path.exists', (['"""pair"""'], {}), "('pair')\n", (3551, 3559), False, 'import os\n'), ((3569, 3585), 'os.mkdir', 'os.mkdir', (['"""pair"""'], {}), "('pair')\n", (3577, 3585), False, 'import os\n'), ((4925, 5179), 'numpy.savez', 'np.savez', (['"""array_save.npz"""', 'warped_moving', 'warped_moving0', 'warped_moving1', 'warped_moving2', 'warped_moving3', 'warped_moving4', 'warped_moving5', 'warped_moving6', 'warped_moving7', 'warped_moving8', 'warped_moving9', 'warped_moving10', 'image_fixed', 'indexname'], {}), "('array_save.npz', warped_moving, warped_moving0, warped_moving1,\n warped_moving2, warped_moving3, warped_moving4, warped_moving5,\n warped_moving6, warped_moving7, warped_moving8, warped_moving9,\n warped_moving10, image_fixed, indexname)\n", (4933, 5179), True, 'import numpy as np\n'), ((1779, 1791), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1788, 1791), False, 'import json\n'), ((2462, 2488), 'os.path.join', 'os.path.join', (['args.dataset'], {}), '(args.dataset)\n', (2474, 2488), False, 'import os\n'), ((1705, 1741), 'os.path.join', 'os.path.join', (['model_dir', '"""args.json"""'], {}), "(model_dir, 'args.json')\n", (1717, 1741), False, 'import os\n')] |
import matplotlib.pyplot as plt
import math
import numpy as np
class PrimeNumberAnalyzer():
"""
Analyzes prime numbers
"""
def __init__(self, lower, higher):
self.run_analysis(lower, higher)
def is_prime(self, number):
"""
Determines whether the number "number" is in fact prime, if so, it
returns true. Otherwise, it returns false.
"""
if number % 2 == 0 and number > 2:
return False
return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
def analyze_distribution(self, starting_number, ending_number):
"""
Analyzes the prime numbers and returns 4 arrays:
1) All prime numbers that end in 1
2) All prime numbers that end in 3
3) All prime numbers that end in 5
4) All prime numbers that end in 7
"""
# Creating prime number arrays
end_in_1 = []
end_in_3 = []
end_in_5 = []
end_in_7 = []
# Identifying all prime numbers from "starting_number" and
# "ending_number"
for number in range(starting_number, ending_number):
print("Determining if " + str(number) + " is a prime number")
if self.is_prime(number):
remainder = number % 10
if remainder == 1:
end_in_1.append(number)
elif remainder == 3:
end_in_3.append(number)
elif remainder == 5:
end_in_5.append(number)
elif remainder == 7:
end_in_7.append(number)
# Returning final prime number arrays
return [end_in_1, end_in_3, end_in_5, end_in_7]
def display_prime_number_analysis(self, prime_number_arrays, starting_number, ending_number):
"""
Displays (using matplotlib) the distribution of prime numbers
"""
# Creating temporary arrays
prime_number_1 = []
prime_number_3 = []
prime_number_5 = []
prime_number_7 = []
# Array totals
total_1 = 0
total_3 = 0
total_5 = 0
total_7 = 0
overall_total = 1.0
for number_index in range(starting_number, ending_number):
print("Totaling number: " + str(number_index))
if self.is_prime(number_index):
if number_index in prime_number_arrays[0]:
total_1 += 1
elif number_index in prime_number_arrays[1]:
total_3 += 1
elif number_index in prime_number_arrays[2]:
total_5 += 1
elif number_index in prime_number_arrays[3]:
total_7 += 1
overall_total = total_1 + total_3 + total_5 + total_7
overall_total /= 100.0
if overall_total == 0.0:
overall_total = 1.0
prime_number_1.append(total_1)
prime_number_3.append(total_3)
prime_number_5.append(total_5)
prime_number_7.append(total_7)
prime_number_1[-1] /= overall_total
prime_number_3[-1] /= overall_total
prime_number_5[-1] /= overall_total
prime_number_7[-1] /= overall_total
print("Plotting graph")
line_1, = plt.plot(np.asarray(prime_number_1), 'r', label='Ending in 1')
line_3, = plt.plot(np.asarray(prime_number_3), 'g', label='Ending in 3')
line_5, = plt.plot(np.asarray(prime_number_5), 'b', label='Ending in 5')
line_7, = plt.plot(np.asarray(prime_number_7), 'y', label='Ending in 7')
plt.ylabel('percentage')
plt.xlabel('number')
print("Total prime numbers: " + str(overall_total * 100))
plt.legend()
plt.show()
def run_analysis(self, starting_number, ending_number):
"""
Run the analysis
"""
prime_arrays = self.analyze_distribution(starting_number, ending_number)
self.display_prime_number_analysis(prime_arrays, starting_number, ending_number)
PrimeNumberAnalyzer(0, 100000)
| [
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.legend",
"numpy.asarray",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((3676, 3700), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""percentage"""'], {}), "('percentage')\n", (3686, 3700), True, 'import matplotlib.pyplot as plt\n'), ((3709, 3729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number"""'], {}), "('number')\n", (3719, 3729), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3816, 3818), True, 'import matplotlib.pyplot as plt\n'), ((3827, 3837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3835, 3837), True, 'import matplotlib.pyplot as plt\n'), ((3370, 3396), 'numpy.asarray', 'np.asarray', (['prime_number_1'], {}), '(prime_number_1)\n', (3380, 3396), True, 'import numpy as np\n'), ((3451, 3477), 'numpy.asarray', 'np.asarray', (['prime_number_3'], {}), '(prime_number_3)\n', (3461, 3477), True, 'import numpy as np\n'), ((3532, 3558), 'numpy.asarray', 'np.asarray', (['prime_number_5'], {}), '(prime_number_5)\n', (3542, 3558), True, 'import numpy as np\n'), ((3613, 3639), 'numpy.asarray', 'np.asarray', (['prime_number_7'], {}), '(prime_number_7)\n', (3623, 3639), True, 'import numpy as np\n'), ((522, 539), 'math.sqrt', 'math.sqrt', (['number'], {}), '(number)\n', (531, 539), False, 'import math\n')] |
import numpy as np
import pandas as pd
from .comparisontools import (do_count_event, make_match_count_matrix, make_agreement_scores_from_count)
class BaseComparison:
"""
Base class for all comparison classes:
* GroundTruthComparison
* MultiSortingComparison
* SymmetricSortingComparison
Mainly deals with:
* sampling_frequency
* sorting names
* delta_time to delta_frames
"""
def __init__(self, sorting_list, name_list=None, delta_time=0.4, sampling_frequency=None,
match_score=0.5, chance_score=0.1, n_jobs=-1, verbose=False):
self.sorting_list = sorting_list
if name_list is None:
name_list = ['sorting{}'.format(i + 1) for i in range(len(sorting_list))]
self.name_list = name_list
if np.any(['_' in name for name in name_list]):
raise ValueError("Sorter names in 'name_list' cannot contain '_'")
if sampling_frequency is None:
# take sampling frequency from sorting list and test that they are equivalent.
sampling_freqs_not_none = np.array([s.get_sampling_frequency() for s in self.sorting_list
if s.get_sampling_frequency() is not None], dtype='float64')
assert len(sampling_freqs_not_none) > 0, ("Sampling frequency information "
"not found in sorting. Pass it with the 'sampling_frequency' "
"argument")
# Some sorter round the sampling freq lets emit a warning
sf0 = sampling_freqs_not_none[0]
if not np.all([sf == sf0 for sf in sampling_freqs_not_none]):
delta_freq_ratio = np.abs(sampling_freqs_not_none - sf0) / sf0
# tolerence of 0.1%
assert np.all(delta_freq_ratio < 0.001), "Inconsintent sampling frequency among sorting list"
sampling_frequency = sampling_freqs_not_none[0]
self.sampling_frequency = sampling_frequency
self.delta_time = delta_time
self.delta_frames = int(self.delta_time / 1000 * self.sampling_frequency)
self.match_score = match_score
self.chance_score = chance_score
self._n_jobs = n_jobs
self._verbose = verbose
class BaseTwoSorterComparison(BaseComparison):
"""
Base class shared by SortingComparison and GroundTruthComparison
"""
def __init__(self, sorting1, sorting2, sorting1_name=None, sorting2_name=None,
delta_time=0.4, sampling_frequency=None, match_score=0.5,
chance_score=0.1, n_jobs=1, verbose=False):
sorting_list = [sorting1, sorting2]
if sorting1_name is None:
sorting1_name = 'sorting1'
if sorting2_name is None:
sorting2_name = 'sorting2'
name_list = [sorting1_name, sorting2_name]
BaseComparison.__init__(self, sorting_list, name_list=name_list, delta_time=delta_time,
sampling_frequency=sampling_frequency, match_score=match_score,
chance_score=chance_score, verbose=verbose, n_jobs=n_jobs)
self.unit1_ids = self.sorting1.get_unit_ids()
self.unit2_ids = self.sorting2.get_unit_ids()
self._do_agreement()
self._do_matching()
@property
def sorting1(self):
return self.sorting_list[0]
@property
def sorting2(self):
return self.sorting_list[1]
@property
def sorting1_name(self):
return self.name_list[0]
@property
def sorting2_name(self):
return self.name_list[1]
def _do_agreement(self):
if self._verbose:
print('Agreement scores...')
# common to GroundTruthComparison and SymmetricSortingComparison
# spike count for each spike train
self.event_counts1 = do_count_event(self.sorting1)
self.event_counts2 = do_count_event(self.sorting2)
# matrix of event match count for each pair
self.match_event_count = make_match_count_matrix(self.sorting1, self.sorting2, self.delta_frames,
n_jobs=self._n_jobs)
# agreement matrix score for each pair
self.agreement_scores = make_agreement_scores_from_count(self.match_event_count, self.event_counts1,
self.event_counts2)
def _do_matching(self):
# must be implemented in subclass
raise NotImplementedError
def get_ordered_agreement_scores(self):
# order rows
order0 = self.agreement_scores.max(axis=1).argsort()
scores = self.agreement_scores.iloc[order0.values[::-1], :]
# order columns
indexes = np.arange(scores.shape[1])
order1 = []
for r in range(scores.shape[0]):
possible = indexes[~np.in1d(indexes, order1)]
if possible.size > 0:
ind = np.argmax(scores.iloc[r, possible].values)
order1.append(possible[ind])
remain = indexes[~np.in1d(indexes, order1)]
order1.extend(remain)
scores = scores.iloc[:, order1]
return scores
| [
"numpy.abs",
"numpy.argmax",
"numpy.any",
"numpy.arange",
"numpy.all",
"numpy.in1d"
] | [((816, 861), 'numpy.any', 'np.any', (["[('_' in name) for name in name_list]"], {}), "([('_' in name) for name in name_list])\n", (822, 861), True, 'import numpy as np\n'), ((4850, 4876), 'numpy.arange', 'np.arange', (['scores.shape[1]'], {}), '(scores.shape[1])\n', (4859, 4876), True, 'import numpy as np\n'), ((1688, 1743), 'numpy.all', 'np.all', (['[(sf == sf0) for sf in sampling_freqs_not_none]'], {}), '([(sf == sf0) for sf in sampling_freqs_not_none])\n', (1694, 1743), True, 'import numpy as np\n'), ((1881, 1913), 'numpy.all', 'np.all', (['(delta_freq_ratio < 0.001)'], {}), '(delta_freq_ratio < 0.001)\n', (1887, 1913), True, 'import numpy as np\n'), ((5052, 5094), 'numpy.argmax', 'np.argmax', (['scores.iloc[r, possible].values'], {}), '(scores.iloc[r, possible].values)\n', (5061, 5094), True, 'import numpy as np\n'), ((5166, 5190), 'numpy.in1d', 'np.in1d', (['indexes', 'order1'], {}), '(indexes, order1)\n', (5173, 5190), True, 'import numpy as np\n'), ((1778, 1815), 'numpy.abs', 'np.abs', (['(sampling_freqs_not_none - sf0)'], {}), '(sampling_freqs_not_none - sf0)\n', (1784, 1815), True, 'import numpy as np\n'), ((4970, 4994), 'numpy.in1d', 'np.in1d', (['indexes', 'order1'], {}), '(indexes, order1)\n', (4977, 4994), True, 'import numpy as np\n')] |
# TrueSkill is a rating system based on Bayesian inference, estimating each players skill as a gaussian like Elo rating.
# See trueskill.org for more.
import pandas as pd, numpy as np
from trueskill import TrueSkill, Rating, rate_1vs1
ts = TrueSkill(draw_probability=0.01) # 0.01 is arbitary small number
beta = 25 / 6 # default value
def win_probability(p1, p2):
delta_mu = p1.mu - p2.mu
sum_sigma = p1.sigma * p1.sigma + p2.sigma * p2.sigma
denom = np.sqrt(2 * (beta * beta) + sum_sigma)
return ts.cdf(delta_mu / denom)
submit = pd.read_csv('../input/SampleSubmissionStage1.csv')
submit[['Season', 'Team1', 'Team2']] = submit.apply(lambda r:pd.Series([int(t) for t in r.ID.split('_')]), axis=1)
df_tour = pd.read_csv('../input/RegularSeasonCompactResults.csv')
teamIds = np.unique(np.concatenate([df_tour.WTeamID.values, df_tour.LTeamID.values]))
ratings = { tid:ts.Rating() for tid in teamIds }
def feed_season_results(season):
print("season = {}".format(season))
df1 = df_tour[df_tour.Season == season]
for r in df1.itertuples():
ratings[r.WTeamID], ratings[r.LTeamID] = rate_1vs1(ratings[r.WTeamID], ratings[r.LTeamID])
def update_pred(season):
beta = np.std([r.mu for r in ratings.values()])
print("beta = {}".format(beta))
submit.loc[submit.Season==season, 'Pred'] = submit[submit.Season==season].apply(lambda r:win_probability(ratings[r.Team1], ratings[r.Team2]), axis=1)
for season in sorted(df_tour.Season.unique())[:-4]: # exclude last 4 years
feed_season_results(season)
update_pred(2014)
feed_season_results(2014)
update_pred(2015)
feed_season_results(2015)
update_pred(2016)
feed_season_results(2016)
update_pred(2017)
submit.drop(['Season', 'Team1', 'Team2'], axis=1, inplace=True)
submit.to_csv('trueskill_estimation.csv', index=None)
| [
"pandas.read_csv",
"trueskill.TrueSkill",
"trueskill.rate_1vs1",
"numpy.concatenate",
"numpy.sqrt"
] | [((242, 274), 'trueskill.TrueSkill', 'TrueSkill', ([], {'draw_probability': '(0.01)'}), '(draw_probability=0.01)\n', (251, 274), False, 'from trueskill import TrueSkill, Rating, rate_1vs1\n'), ((556, 606), 'pandas.read_csv', 'pd.read_csv', (['"""../input/SampleSubmissionStage1.csv"""'], {}), "('../input/SampleSubmissionStage1.csv')\n", (567, 606), True, 'import pandas as pd, numpy as np\n'), ((733, 788), 'pandas.read_csv', 'pd.read_csv', (['"""../input/RegularSeasonCompactResults.csv"""'], {}), "('../input/RegularSeasonCompactResults.csv')\n", (744, 788), True, 'import pandas as pd, numpy as np\n'), ((467, 505), 'numpy.sqrt', 'np.sqrt', (['(2 * (beta * beta) + sum_sigma)'], {}), '(2 * (beta * beta) + sum_sigma)\n', (474, 505), True, 'import pandas as pd, numpy as np\n'), ((809, 873), 'numpy.concatenate', 'np.concatenate', (['[df_tour.WTeamID.values, df_tour.LTeamID.values]'], {}), '([df_tour.WTeamID.values, df_tour.LTeamID.values])\n', (823, 873), True, 'import pandas as pd, numpy as np\n'), ((1122, 1171), 'trueskill.rate_1vs1', 'rate_1vs1', (['ratings[r.WTeamID]', 'ratings[r.LTeamID]'], {}), '(ratings[r.WTeamID], ratings[r.LTeamID])\n', (1131, 1171), False, 'from trueskill import TrueSkill, Rating, rate_1vs1\n')] |
from constants import *
import math
import numpy as np
import time
from util import getPolicies, UpperP, LowerP, indexOfPolicy
from util import itConvergencePolicy, getRewards, getProb, allOneNeighbours
from util import CalculateDelDelV, prob_step, delW
from evaluatePolicy import evaluatePolicy
verbose = 0
## policyMethod = 0 : brute force method, = 1 : nearest neighbour approach
policyMethod = 0
plot_vstar = True
def policyIt(mdp, start_state=0, epsilon=4, randomseed=None, delta=0.1, bounds="MBAE", use_ddv=False, mc = True):
if(randomseed is not None):
np.random.seed(randomseed)
policies = np.array(getPolicies(mdp.numStates, mdp.numActions))
numPolicies = len(policies)
counts = np.zeros((numPolicies))
print(numPolicies)
#H = int((math.log(mdp.Vmax) + math.log(6.0/epsilon))/(1-mdp.discountFactor))
H = int(math.log(epsilon/(2*mdp.Vmax*(1 - mdp.discountFactor)))/math.log(mdp.discountFactor))
print("Chosen value of H is : ", H)
## Initializations
it = 0
samples = 0
initial_iterations = 1*mdp.numStates*mdp.numActions
R_s_a = np.zeros((mdp.numStates,mdp.numActions))
N_s_a_sprime = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates), dtype=np.int)
N_s_a = np.zeros((mdp.numStates,mdp.numActions), dtype=np.int)
P_s_a_sprime = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))
Qupper = mdp.Vmax*np.ones((numPolicies, mdp.numStates))
QupperMBAE = mdp.Vmax*np.ones((numPolicies, mdp.numStates))
Qlower = np.zeros((numPolicies, mdp.numStates))
Qstar = (mdp.Vmax/2)*np.ones((numPolicies, mdp.numStates))
QstarMBAE = (mdp.Vmax/2)*np.ones((numPolicies, mdp.numStates))
QlowerMBAE = np.zeros((numPolicies, mdp.numStates))
P_tilda = np.zeros((numPolicies, mdp.numStates,mdp.numStates))
P_lower_tilda = np.zeros((numPolicies, mdp.numStates,mdp.numStates))
VlowerMBAE = np.zeros((numPolicies, mdp.numStates))
VupperMBAE = mdp.Vmax*np.ones((numPolicies, mdp.numStates))
Vstar = (mdp.Vmax/2)*np.ones((numPolicies, mdp.numStates))
discovered_states = set([start_state])
deltadeltaV = np.zeros((mdp.numStates))
#sampling all state action pairs
while it < initial_iterations:
for state in range(mdp.numStates):
for act in range(mdp.numActions):
it = it + 1
ss, rr = mdp.simulate(state, act)
R_s_a[state][act] = (rr + R_s_a[state][act]*N_s_a[state][act])/(N_s_a[state][act]+1)
N_s_a[state][act] = N_s_a[state][act] + 1
N_s_a_sprime[state][act][ss] = N_s_a_sprime[state][act][ss] + 1
# P_s_a_sprime = np.copy(N_s_a_sprime)
for s2 in range(mdp.numStates):
P_s_a_sprime[state][act][s2] = (float)(N_s_a_sprime[state][act][s2])/N_s_a[state][act]
samples += initial_iterations
if(use_ddv):
ff = open(mdp.filename+'-policyddv' + str(randomseed) +'.txt', 'wb')
else:
ff = open(mdp.filename+'-policy' + str(randomseed) +'.txt', 'wb')
while samples<MAX_ITERATION_LIMIT:
# print counts
if(policyMethod == 0):
for p in range(numPolicies):
# print "Policy Number : ", p
current_policy = policies[p]
for i in range(mdp.numStates):
# print "For state ", i, " doing UpperP"
if(N_s_a[i][current_policy[i]]>0):
P_tilda[p][i] = UpperP(
i,
current_policy[i],
delta,
N_s_a_sprime[i][current_policy[i]],
mdp.numStates,
Qupper[p],
False
)
P_lower_tilda[p][i] = LowerP(
i,
current_policy[i],
delta,
N_s_a_sprime[i][current_policy[i]],
mdp.numStates,
Qlower[p],
False
)
#computing all three versions of Q given current knowlege of transition and reward matrices
Qupper[p] = itConvergencePolicy(
Qupper[p],
getRewards(R_s_a, current_policy),
P_tilda[p],
mdp.discountFactor,
epsilon,
converge_iterations,
epsilon_convergence
)
Qlower[p] = itConvergencePolicy(
Qlower[p],
getRewards(R_s_a, current_policy),
P_lower_tilda[p],
mdp.discountFactor,
epsilon,
converge_iterations,
epsilon_convergence
)
Qstar[p] = itConvergencePolicy(
Qstar[p],
getRewards(R_s_a, current_policy),
getProb(P_s_a_sprime, current_policy),
mdp.discountFactor,
epsilon,
converge_iterations,
epsilon_convergence
)
# import pdb; pdb.set_trace()
# print "mbie bounds calculated!"
for internal in range(converge_iterations):
oldQlowerMBAE = np.copy(QlowerMBAE[p][start_state])
for state in range(mdp.numStates):
# for act in range(mdp.numActions):
act = current_policy[state]
# Calculations for QupperMBAE and QlowerMBAE
firstterm = R_s_a[state][act]
# print VupperMBAE[p]
secondterm = mdp.discountFactor*np.sum(VupperMBAE[p]*(P_s_a_sprime[state][act]))
lower_secondterm = mdp.discountFactor*np.sum(VlowerMBAE[p]*(P_s_a_sprime[state][act]))
star_secondterm = mdp.discountFactor*np.sum(Vstar[p]*(P_s_a_sprime[state][act]))
thirdterm = mdp.Vmax*math.sqrt((math.log(c*(samples**2)*mdp.numStates*1)-math.log(delta))/N_s_a[state][act])
QupperMBAE[p][state] = firstterm + secondterm + thirdterm
QlowerMBAE[p][state] = firstterm + lower_secondterm - thirdterm
QstarMBAE[p][state] = firstterm + star_secondterm
VupperMBAE[p][state] = QupperMBAE[p][state]
VlowerMBAE[p][state] = QlowerMBAE[p][state]
Vstar[p][state] = QstarMBAE[p][state]
if(np.linalg.norm(oldQlowerMBAE-QlowerMBAE[p][start_state])<=epsilon_convergence):
break
# print VupperMBAE[p]
# import pdb; pdb.set_trace()
policy1Index = np.argmax(QstarMBAE[:,start_state])
policy2choices = QupperMBAE[:,start_state].argsort()[::-1]
if(policy2choices[0]==policy1Index):
policy2Index = policy2choices[1]
else:
policy2Index = policy2choices[0]
# print "polivyiniex", QstarMBAE[:,start_state]
#action switching policy iteration
# elif(policyMethod==1):
# # print "Choosing 2nd method for finding policy"
# p = np.random.randint(0,numPolicies)
# current_policy = policies[p]
# while True:
# for internal in range(converge_iterations):
# oldQlowerMBAE = np.copy(QlowerMBAE[p][start_state])
# for state in range(mdp.numStates):
# # for act in range(mdp.numActions):
# act = policies[p][state]
# # Calculations for QupperMBAE and QlowerMBAE
# firstterm = R_s_a[state][act]
# secondterm = mdp.discountFactor*np.sum(VupperMBAE[p]*(P_s_a_sprime[state][act]))
# lower_secondterm = mdp.discountFactor*np.sum(VlowerMBAE[p]*(P_s_a_sprime[state][act]))
# star_secondterm = mdp.discountFactor*np.sum(Vstar[p]*(P_s_a_sprime[state][act]))
# thirdterm = mdp.Vmax*math.sqrt((math.log(c*(samples**2)*mdp.numStates*1)-math.log(delta))/N_s_a[state][act])
# QupperMBAE[p][state] = firstterm + secondterm + thirdterm
# QlowerMBAE[p][state] = firstterm + lower_secondterm - thirdterm
# QstarMBAE[p][state] = firstterm + star_secondterm
# VupperMBAE[p][state] = QupperMBAE[p][state]
# VlowerMBAE[p][state] = QlowerMBAE[p][state]
# Vstar[p][state] = QstarMBAE[p][state]
# if(np.linalg.norm(oldQlowerMBAE-QlowerMBAE[p][start_state])<=epsilon_convergence):
# break
# hasChanged = False
# for st in range(mdp.numStates):
# for ac in range(mdp.numActions):
# if(current_policy[st]==ac):
# continue
# else:
# tempfirstterm = R_s_a[st][ac]
# tempsecondterm = mdp.discountFactor*np.sum(VupperMBAE[p]*(P_s_a_sprime[st][ac]))
# lower_secondterm = mdp.discountFactor*np.sum(VlowerMBAE[p]*(P_s_a_sprime[st][ac]))
# star_secondterm = mdp.discountFactor*np.sum(Vstar[p]*(P_s_a_sprime[st][ac]))
# tempthirdterm = mdp.Vmax*math.sqrt((math.log(c*(samples**2)*mdp.numStates*1)-math.log(delta))/N_s_a[st][ac])
# tempQupperMBAE = tempfirstterm + tempsecondterm + tempthirdterm
# tempQlowerMBAE = tempfirstterm + lower_secondterm - tempthirdterm
# tempQstarMBAE = tempfirstterm + star_secondterm
# tempVupperMBAE = tempQupperMBAE
# tempVlowerMBAE = tempQlowerMBAE
# tempVstar = tempQstarMBAE
# if(tempVstar>Vstar[p][st]):
# # if(tempVupperMBAE>VupperMBAE[p][st]):
# current_policy[st] = ac
# hasChanged = True
# break
# # if(hasChanged):
# # break
# if hasChanged:
# p = indexOfPolicy(current_policy,mdp.numStates,mdp.numActions)
# print "Changing to ",current_policy, p
# else:
# policy1Index = p
# # print "Found first best policy!",policy1Index
# break
# p = np.random.randint(0,numPolicies)
# current_policy = policies[p]
# while True:
# for internal in range(converge_iterations):
# oldQlowerMBAE = np.copy(QlowerMBAE[p][start_state])
# for state in range(mdp.numStates):
# # for act in range(mdp.numActions):
# act = policies[p][state]
# # Calculations for QupperMBAE and QlowerMBAE
# firstterm = R_s_a[state][act]
# secondterm = mdp.discountFactor*np.sum(VupperMBAE[p]*(P_s_a_sprime[state][act]))
# lower_secondterm = mdp.discountFactor*np.sum(VlowerMBAE[p]*(P_s_a_sprime[state][act]))
# star_secondterm = mdp.discountFactor*np.sum(Vstar[p]*(P_s_a_sprime[state][act]))
# thirdterm = mdp.Vmax*math.sqrt((math.log(c*(samples**2)*mdp.numStates*1)-math.log(delta))/N_s_a[state][act])
# QupperMBAE[p][state] = firstterm + secondterm + thirdterm
# QlowerMBAE[p][state] = firstterm + lower_secondterm - thirdterm
# QstarMBAE[p][state] = firstterm + star_secondterm
# VupperMBAE[p][state] = QupperMBAE[p][state]
# VlowerMBAE[p][state] = QlowerMBAE[p][state]
# Vstar[p][state] = QstarMBAE[p][state]
# if(np.linalg.norm(oldQlowerMBAE-QlowerMBAE[p][start_state])<=epsilon_convergence):
# break
# hasChanged = False
# for st in range(mdp.numStates):
# for ac in range(mdp.numActions):
# if(current_policy[st]==ac):
# continue
# else:
# tempfirstterm = R_s_a[st][ac]
# tempsecondterm = mdp.discountFactor*np.sum(VupperMBAE[p]*(P_s_a_sprime[st][ac]))
# lower_secondterm = mdp.discountFactor*np.sum(VlowerMBAE[p]*(P_s_a_sprime[st][ac]))
# star_secondterm = mdp.discountFactor*np.sum(Vstar[p]*(P_s_a_sprime[st][ac]))
# tempthirdterm = mdp.Vmax*math.sqrt((math.log(c*(samples**2)*mdp.numStates*1)-math.log(delta))/N_s_a[st][ac])
# tempQupperMBAE = tempfirstterm + tempsecondterm + tempthirdterm
# tempQlowerMBAE = tempfirstterm + lower_secondterm - tempthirdterm
# tempQstarMBAE = tempfirstterm + star_secondterm
# tempVupperMBAE = tempQupperMBAE
# tempVlowerMBAE = tempQlowerMBAE
# tempVstar = tempQstarMBAE
# if(tempVupperMBAE>VupperMBAE[p][st]):
# # if(tempVupperMBAE>VupperMBAE[p][st]):
# current_policy[st] = ac
# hasChanged = True
# break
# # if(hasChanged):
# # break
# if hasChanged:
# p = indexOfPolicy(current_policy,mdp.numStates,mdp.numActions)
# print "Changing to ",current_policy, p, "Vupper"
# else:
# policy3Index = p
# # print "Found first best policy!",policy1Index
# break
# ### Hill Climbing for second policy
# # print "Finding 2nd best policy"
# # print VupperMBAE[:,start_state]
# oneNeighbours = allOneNeighbours(policies[policy3Index], mdp.numActions)
# maxVupper = -float("inf")
# bestPolicyIndex = -1
# hasChanged = False
# for p1 in oneNeighbours:
# # print p1
# # print indexOfPolicy(p1,mdp.numStates,mdp.numActions)
# p1Index = indexOfPolicy(p1,mdp.numStates,mdp.numActions)
# for internal in range(converge_iterations):
# oldQlowerMBAE = np.copy(QlowerMBAE[p1Index][start_state])
# for state in range(mdp.numStates):
# # for act in range(mdp.numActions):
# act = p1[state]
# # Calculations for QupperMBAE and QlowerMBAE
# firstterm = R_s_a[state][act]
# secondterm = mdp.discountFactor*np.sum(VupperMBAE[p1Index]*(P_s_a_sprime[state][act]))
# lower_secondterm = mdp.discountFactor*np.sum(VlowerMBAE[p1Index]*(P_s_a_sprime[state][act]))
# star_secondterm = mdp.discountFactor*np.sum(Vstar[p1Index]*(P_s_a_sprime[state][act]))
# thirdterm = mdp.Vmax*math.sqrt((math.log(c*(samples**2)*mdp.numStates*1)-math.log(delta))/N_s_a[state][act])
# QupperMBAE[p1Index][state] = firstterm + secondterm + thirdterm
# QlowerMBAE[p1Index][state] = firstterm + lower_secondterm - thirdterm
# QstarMBAE[p1Index][state] = firstterm + star_secondterm
# VupperMBAE[p1Index][state] = QupperMBAE[p1Index][state]
# VlowerMBAE[p1Index][state] = QlowerMBAE[p1Index][state]
# Vstar[p1Index][state] = QstarMBAE[p1Index][state]
# if(np.linalg.norm(oldQlowerMBAE-QlowerMBAE[p1Index][start_state])<=epsilon_convergence):
# break
# if(VupperMBAE[p1Index][start_state]>maxVupper):
# bestPolicyIndex = p1Index
# maxVupper = VupperMBAE[p1Index][start_state]
# # print Vstar[0]
# hasChanged = True
# if hasChanged:
# p = bestPolicyIndex
# policy2Index = bestPolicyIndex
# print "Second best policy ", policy2Index
#policy iteration methods end
h=0
policy1 = policies[policy1Index]
policy2 = policies[policy2Index]
# print QlowerMBAE
# print policy2
# print QstarMBAE[:,start_state]
state = start_state
if (samples%1000)<100:
if(verbose==0):
# print QupperMBAE[:,start_state]
# print Qstar[:,start_state]
ff.write(str(samples))
ff.write('\t')
if(plot_vstar):
# ff.write(str(Vstar[policy1Index][start_state]))
ff.write(str(evaluatePolicy(mdp, policy1, start_state)))
print(evaluatePolicy(mdp, policy1, start_state))
print(policy1, policy2)
else:
ff.write(str(QupperMBAE[policy2Index][start_state]-QlowerMBAE[policy1Index][start_state]))#-epsilon*(1-mdp.discountFactor)/2
print(samples, QupperMBAE[policy2Index][start_state]-QlowerMBAE[policy1Index][start_state])
ff.write('\n')
else:
print(samples)
print(QupperMBAE[:,start_state], QlowerMBAE[:,start_state])
# np.savetxt(ff, (policies[policy1Index]), fmt="%d")
counts[policy1Index] += 1
counts[policy2Index] += 1
polList = [policy1Index, policy2Index]
if(use_ddv):
## Caclulate V for all states
for pnum in polList:
policiesfddv = policies[pnum]
# print "Getting DDV values"
for st in list(discovered_states):
ac = policiesfddv[st]
#### Compute del del V
deltadeltaV[st] = CalculateDelDelV(
st,
ac,
mdp,
N_s_a_sprime,
QupperMBAE[pnum],
QlowerMBAE[pnum],
None,
None,
start_state,
P_s_a_sprime,
P_tilda[pnum],
P_lower_tilda[pnum],
R_s_a,
epsilon,
delta,
converge_iterations,
epsilon_convergence,
policiesfddv
)
# print deltadeltaV
cs = np.argmax(deltadeltaV)
ca = policiesfddv[cs]
# print deltadeltaV, cs, ca
# print deltadeltaV, policy1, policy2
# print "Found max state for DDV: ",cs,ca
# time.sleep(0.1)
ss, rr = mdp.simulate(cs, ca)
print("Policy is ", policiesfddv)
print("Sampling ", cs, ca)
time.sleep(0.1)
samples = samples + 1
discovered_states.add(ss)
R_s_a[cs][ca] = (rr + R_s_a[cs][ca]*N_s_a[cs][ca])/(N_s_a[cs][ca]+1)
N_s_a[cs][ca] += 1
N_s_a_sprime[cs][ca][ss] += 1
# P_s_a_sprime = np.copy(N_s_a_sprime)
for s2 in range(mdp.numStates):
P_s_a_sprime[cs][ca][s2] = (float)(N_s_a_sprime[cs][ca][s2])/N_s_a[cs][ca]
elif(mc):
deltaW = np.zeros(mdp.numStates)
mu = np.zeros(mdp.numStates)
D = np.zeros(mdp.numStates)
mu[start_state] = 1
for t in range(H):
D = D + (mdp.discountFactor**t) * mu
mu = prob_step(mu, P_s_a_sprime, policy1)
for st in range(mdp.numStates):
#transition uncertainty for given s, pi(s)
deltaW[st] = delW(st, policy1[st], delta, N_s_a_sprime[st][policy1[st]], mdp.numStates, False)
st = np.argmax(deltaW * D)
ac = policy1[st]
ss, rr = mdp.simulate(st, ac)
samples += 1
R_s_a[state][act] = (rr + R_s_a[state][act]*N_s_a[state][act])/(N_s_a[state][act]+1)
N_s_a[state][act] += 1
N_s_a_sprime[state][act][ss] += 1
# P_s_a_sprime = np.copy(N_s_a_sprime)
for s2 in range(mdp.numStates):
P_s_a_sprime[state][act][s2] = (float)(N_s_a_sprime[state][act][s2])/N_s_a[state][act]
deltaW = np.zeros(mdp.numStates)
mu = np.zeros(mdp.numStates)
D = np.zeros(mdp.numStates)
mu[start_state] = 1
for t in range(H):
D = D + (mdp.discountFactor**t) * mu
mu = prob_step(mu, P_s_a_sprime, policy2)
for st in range(mdp.numStates):
#transition uncertainty for given s, pi(s)
deltaW[st] = delW(st, policy2[st], delta, N_s_a_sprime[st][policy2[st]], mdp.numStates, False)
st = np.argmax(deltaW * D)
ac = policy2[st]
ss, rr = mdp.simulate(st, ac)
samples += 1
R_s_a[state][act] = (rr + R_s_a[state][act]*N_s_a[state][act])/(N_s_a[state][act]+1)
N_s_a[state][act] += 1
N_s_a_sprime[state][act][ss] += 1
# P_s_a_sprime = np.copy(N_s_a_sprime)
for s2 in range(mdp.numStates):
P_s_a_sprime[state][act][s2] = (float)(N_s_a_sprime[state][act][s2])/N_s_a[state][act]
else:
while h<H:
act = policy1[state]
# print "------>",current_state, current_action
ss, rr = mdp.simulate(state, act)
samples+=1
R_s_a[state][act] = (rr + R_s_a[state][act]*N_s_a[state][act])/(N_s_a[state][act]+1)
N_s_a[state][act] += 1
N_s_a_sprime[state][act][ss] += 1
# P_s_a_sprime = np.copy(N_s_a_sprime)
for s2 in range(mdp.numStates):
P_s_a_sprime[state][act][s2] = (float)(N_s_a_sprime[state][act][s2])/N_s_a[state][act]
state = ss
h+=1
h=0
state = start_state
# print "episode : "
while h<H:
# print state,
act = policy2[state]
ss, rr = mdp.simulate(state, act)
samples+=1
R_s_a[state][act] = (rr + R_s_a[state][act]*N_s_a[state][act])/(N_s_a[state][act]+1)
N_s_a[state][act] += 1
N_s_a_sprime[state][act][ss] += 1
# P_s_a_sprime = np.copy(N_s_a_sprime)
for s2 in range(mdp.numStates):
P_s_a_sprime[state][act][s2] = (float)(N_s_a_sprime[state][act][s2])/N_s_a[state][act]
state = ss
h+=1
if (samples%1000)<1000:
if(QupperMBAE[policy2Index][start_state]-QlowerMBAE[policy1Index][start_state]-epsilon*(1-mdp.discountFactor)/2<0):
print(Qupper[policy2Index][start_state],Qstar[policy1Index][start_state],epsilon*(1-mdp.discountFactor)/2)
print("Epsilon condition reached at ",samples, " samples")
print(policy1)
return(policy1)
else:
# print QupperMBAE[policy2Index][start_state],QstarMBAE[policy1Index][start_state],epsilon*(1-mdp.discountFactor)/2
pass
# print "ends here"
ff.close()
return policy1
| [
"numpy.random.seed",
"util.UpperP",
"numpy.copy",
"numpy.argmax",
"util.LowerP",
"util.delW",
"evaluatePolicy.evaluatePolicy",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"util.CalculateDelDelV",
"time.sleep",
"util.prob_step",
"util.getRewards",
"numpy.linalg.norm",
"util.getProb",
"m... | [((698, 719), 'numpy.zeros', 'np.zeros', (['numPolicies'], {}), '(numPolicies)\n', (706, 719), True, 'import numpy as np\n'), ((1059, 1100), 'numpy.zeros', 'np.zeros', (['(mdp.numStates, mdp.numActions)'], {}), '((mdp.numStates, mdp.numActions))\n', (1067, 1100), True, 'import numpy as np\n'), ((1116, 1186), 'numpy.zeros', 'np.zeros', (['(mdp.numStates, mdp.numActions, mdp.numStates)'], {'dtype': 'np.int'}), '((mdp.numStates, mdp.numActions, mdp.numStates), dtype=np.int)\n', (1124, 1186), True, 'import numpy as np\n'), ((1194, 1249), 'numpy.zeros', 'np.zeros', (['(mdp.numStates, mdp.numActions)'], {'dtype': 'np.int'}), '((mdp.numStates, mdp.numActions), dtype=np.int)\n', (1202, 1249), True, 'import numpy as np\n'), ((1265, 1321), 'numpy.zeros', 'np.zeros', (['(mdp.numStates, mdp.numActions, mdp.numStates)'], {}), '((mdp.numStates, mdp.numActions, mdp.numStates))\n', (1273, 1321), True, 'import numpy as np\n'), ((1448, 1486), 'numpy.zeros', 'np.zeros', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1456, 1486), True, 'import numpy as np\n'), ((1625, 1663), 'numpy.zeros', 'np.zeros', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1633, 1663), True, 'import numpy as np\n'), ((1675, 1728), 'numpy.zeros', 'np.zeros', (['(numPolicies, mdp.numStates, mdp.numStates)'], {}), '((numPolicies, mdp.numStates, mdp.numStates))\n', (1683, 1728), True, 'import numpy as np\n'), ((1745, 1798), 'numpy.zeros', 'np.zeros', (['(numPolicies, mdp.numStates, mdp.numStates)'], {}), '((numPolicies, mdp.numStates, mdp.numStates))\n', (1753, 1798), True, 'import numpy as np\n'), ((1812, 1850), 'numpy.zeros', 'np.zeros', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1820, 1850), True, 'import numpy as np\n'), ((2027, 2050), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (2035, 2050), True, 'import numpy as np\n'), ((567, 593), 'numpy.random.seed', 'np.random.seed', (['randomseed'], {}), '(randomseed)\n', (581, 593), True, 'import numpy as np\n'), ((615, 657), 'util.getPolicies', 'getPolicies', (['mdp.numStates', 'mdp.numActions'], {}), '(mdp.numStates, mdp.numActions)\n', (626, 657), False, 'from util import getPolicies, UpperP, LowerP, indexOfPolicy\n'), ((1339, 1376), 'numpy.ones', 'np.ones', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1346, 1376), True, 'import numpy as np\n'), ((1400, 1437), 'numpy.ones', 'np.ones', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1407, 1437), True, 'import numpy as np\n'), ((1509, 1546), 'numpy.ones', 'np.ones', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1516, 1546), True, 'import numpy as np\n'), ((1573, 1610), 'numpy.ones', 'np.ones', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1580, 1610), True, 'import numpy as np\n'), ((1874, 1911), 'numpy.ones', 'np.ones', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1881, 1911), True, 'import numpy as np\n'), ((1934, 1971), 'numpy.ones', 'np.ones', (['(numPolicies, mdp.numStates)'], {}), '((numPolicies, mdp.numStates))\n', (1941, 1971), True, 'import numpy as np\n'), ((830, 891), 'math.log', 'math.log', (['(epsilon / (2 * mdp.Vmax * (1 - mdp.discountFactor)))'], {}), '(epsilon / (2 * mdp.Vmax * (1 - mdp.discountFactor)))\n', (838, 891), False, 'import math\n'), ((886, 914), 'math.log', 'math.log', (['mdp.discountFactor'], {}), '(mdp.discountFactor)\n', (894, 914), False, 'import math\n'), ((5557, 5593), 'numpy.argmax', 'np.argmax', (['QstarMBAE[:, start_state]'], {}), '(QstarMBAE[:, start_state])\n', (5566, 5593), True, 'import numpy as np\n'), ((14952, 14974), 'numpy.argmax', 'np.argmax', (['deltadeltaV'], {}), '(deltadeltaV)\n', (14961, 14974), True, 'import numpy as np\n'), ((15251, 15266), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (15261, 15266), False, 'import time\n'), ((15640, 15663), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (15648, 15663), True, 'import numpy as np\n'), ((15672, 15695), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (15680, 15695), True, 'import numpy as np\n'), ((15703, 15726), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (15711, 15726), True, 'import numpy as np\n'), ((16052, 16073), 'numpy.argmax', 'np.argmax', (['(deltaW * D)'], {}), '(deltaW * D)\n', (16061, 16073), True, 'import numpy as np\n'), ((16478, 16501), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (16486, 16501), True, 'import numpy as np\n'), ((16510, 16533), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (16518, 16533), True, 'import numpy as np\n'), ((16541, 16564), 'numpy.zeros', 'np.zeros', (['mdp.numStates'], {}), '(mdp.numStates)\n', (16549, 16564), True, 'import numpy as np\n'), ((16890, 16911), 'numpy.argmax', 'np.argmax', (['(deltaW * D)'], {}), '(deltaW * D)\n', (16899, 16911), True, 'import numpy as np\n'), ((3648, 3681), 'util.getRewards', 'getRewards', (['R_s_a', 'current_policy'], {}), '(R_s_a, current_policy)\n', (3658, 3681), False, 'from util import itConvergencePolicy, getRewards, getProb, allOneNeighbours\n'), ((3855, 3888), 'util.getRewards', 'getRewards', (['R_s_a', 'current_policy'], {}), '(R_s_a, current_policy)\n', (3865, 3888), False, 'from util import itConvergencePolicy, getRewards, getProb, allOneNeighbours\n'), ((4067, 4100), 'util.getRewards', 'getRewards', (['R_s_a', 'current_policy'], {}), '(R_s_a, current_policy)\n', (4077, 4100), False, 'from util import itConvergencePolicy, getRewards, getProb, allOneNeighbours\n'), ((4107, 4144), 'util.getProb', 'getProb', (['P_s_a_sprime', 'current_policy'], {}), '(P_s_a_sprime, current_policy)\n', (4114, 4144), False, 'from util import itConvergencePolicy, getRewards, getProb, allOneNeighbours\n'), ((4392, 4427), 'numpy.copy', 'np.copy', (['QlowerMBAE[p][start_state]'], {}), '(QlowerMBAE[p][start_state])\n', (4399, 4427), True, 'import numpy as np\n'), ((14568, 14814), 'util.CalculateDelDelV', 'CalculateDelDelV', (['st', 'ac', 'mdp', 'N_s_a_sprime', 'QupperMBAE[pnum]', 'QlowerMBAE[pnum]', 'None', 'None', 'start_state', 'P_s_a_sprime', 'P_tilda[pnum]', 'P_lower_tilda[pnum]', 'R_s_a', 'epsilon', 'delta', 'converge_iterations', 'epsilon_convergence', 'policiesfddv'], {}), '(st, ac, mdp, N_s_a_sprime, QupperMBAE[pnum], QlowerMBAE[\n pnum], None, None, start_state, P_s_a_sprime, P_tilda[pnum],\n P_lower_tilda[pnum], R_s_a, epsilon, delta, converge_iterations,\n epsilon_convergence, policiesfddv)\n', (14584, 14814), False, 'from util import CalculateDelDelV, prob_step, delW\n'), ((15824, 15860), 'util.prob_step', 'prob_step', (['mu', 'P_s_a_sprime', 'policy1'], {}), '(mu, P_s_a_sprime, policy1)\n', (15833, 15860), False, 'from util import CalculateDelDelV, prob_step, delW\n'), ((15961, 16047), 'util.delW', 'delW', (['st', 'policy1[st]', 'delta', 'N_s_a_sprime[st][policy1[st]]', 'mdp.numStates', '(False)'], {}), '(st, policy1[st], delta, N_s_a_sprime[st][policy1[st]], mdp.numStates, \n False)\n', (15965, 16047), False, 'from util import CalculateDelDelV, prob_step, delW\n'), ((16662, 16698), 'util.prob_step', 'prob_step', (['mu', 'P_s_a_sprime', 'policy2'], {}), '(mu, P_s_a_sprime, policy2)\n', (16671, 16698), False, 'from util import CalculateDelDelV, prob_step, delW\n'), ((16799, 16885), 'util.delW', 'delW', (['st', 'policy2[st]', 'delta', 'N_s_a_sprime[st][policy2[st]]', 'mdp.numStates', '(False)'], {}), '(st, policy2[st], delta, N_s_a_sprime[st][policy2[st]], mdp.numStates, \n False)\n', (16803, 16885), False, 'from util import CalculateDelDelV, prob_step, delW\n'), ((3138, 3247), 'util.UpperP', 'UpperP', (['i', 'current_policy[i]', 'delta', 'N_s_a_sprime[i][current_policy[i]]', 'mdp.numStates', 'Qupper[p]', '(False)'], {}), '(i, current_policy[i], delta, N_s_a_sprime[i][current_policy[i]], mdp\n .numStates, Qupper[p], False)\n', (3144, 3247), False, 'from util import getPolicies, UpperP, LowerP, indexOfPolicy\n'), ((3329, 3438), 'util.LowerP', 'LowerP', (['i', 'current_policy[i]', 'delta', 'N_s_a_sprime[i][current_policy[i]]', 'mdp.numStates', 'Qlower[p]', '(False)'], {}), '(i, current_policy[i], delta, N_s_a_sprime[i][current_policy[i]], mdp\n .numStates, Qlower[p], False)\n', (3335, 3438), False, 'from util import getPolicies, UpperP, LowerP, indexOfPolicy\n'), ((5384, 5442), 'numpy.linalg.norm', 'np.linalg.norm', (['(oldQlowerMBAE - QlowerMBAE[p][start_state])'], {}), '(oldQlowerMBAE - QlowerMBAE[p][start_state])\n', (5398, 5442), True, 'import numpy as np\n'), ((13735, 13776), 'evaluatePolicy.evaluatePolicy', 'evaluatePolicy', (['mdp', 'policy1', 'start_state'], {}), '(mdp, policy1, start_state)\n', (13749, 13776), False, 'from evaluatePolicy import evaluatePolicy\n'), ((4698, 4746), 'numpy.sum', 'np.sum', (['(VupperMBAE[p] * P_s_a_sprime[state][act])'], {}), '(VupperMBAE[p] * P_s_a_sprime[state][act])\n', (4704, 4746), True, 'import numpy as np\n'), ((4791, 4839), 'numpy.sum', 'np.sum', (['(VlowerMBAE[p] * P_s_a_sprime[state][act])'], {}), '(VlowerMBAE[p] * P_s_a_sprime[state][act])\n', (4797, 4839), True, 'import numpy as np\n'), ((4883, 4926), 'numpy.sum', 'np.sum', (['(Vstar[p] * P_s_a_sprime[state][act])'], {}), '(Vstar[p] * P_s_a_sprime[state][act])\n', (4889, 4926), True, 'import numpy as np\n'), ((13680, 13721), 'evaluatePolicy.evaluatePolicy', 'evaluatePolicy', (['mdp', 'policy1', 'start_state'], {}), '(mdp, policy1, start_state)\n', (13694, 13721), False, 'from evaluatePolicy import evaluatePolicy\n'), ((4965, 5011), 'math.log', 'math.log', (['(c * samples ** 2 * mdp.numStates * 1)'], {}), '(c * samples ** 2 * mdp.numStates * 1)\n', (4973, 5011), False, 'import math\n'), ((5006, 5021), 'math.log', 'math.log', (['delta'], {}), '(delta)\n', (5014, 5021), False, 'import math\n')] |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import numpy as np
import pytest
import GPy
from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper
import gp.model_wrapper as mw # GPyTorch model wrapper
@pytest.fixture
def train_x() -> np.ndarray:
np.random.seed(31415926)
return np.random.rand(10, 2)
@pytest.fixture
def train_y(train_x: np.ndarray) -> np.ndarray:
return np.sum(np.cos(train_x), axis=1).reshape((-1, 1))
@pytest.fixture
def test_x() -> np.ndarray:
np.random.seed(10)
return np.random.rand(7, 2)
@pytest.fixture
def test_x2() -> np.ndarray:
np.random.seed(100)
return np.random.rand(15, 2)
@pytest.fixture
def wrapper(train_x: np.ndarray, train_y: np.ndarray) -> mw.GPyTorchModelWrapper:
train_x = mw._map_to_tensor(train_x)
train_y = mw._outputs_remove_dim(train_y)
model = mw.GPModel(train_x, train_y)
model.covar_module.base_kernel.lengthscale = 1.0
model.covar_module.outputscale = 1.0
model.likelihood.noise = 1e-3
return mw.GPyTorchModelWrapper(model)
@pytest.fixture
def gpy_wrapper(train_x: np.ndarray, train_y: np.ndarray) -> GPyModelWrapper:
model = GPy.models.GPRegression(train_x, train_y)
model.kern.variance = 1.0
model.kern.lengthscale = 1.0
model.Gaussian_noise.variance.constrain_fixed(1e-3)
wrapper = GPyModelWrapper(model)
return wrapper
def test_init(wrapper: mw.GPyTorchModelWrapper, train_x: np.ndarray, train_y: np.ndarray) -> None:
assert isinstance(wrapper, mw.GPyTorchModelWrapper)
np.testing.assert_allclose(wrapper.X, train_x)
np.testing.assert_allclose(wrapper.Y, train_y)
def test_set_data(wrapper: mw.GPyTorchModelWrapper) -> None:
np.random.seed(10)
x = np.random.rand(20, 2)
y = np.sin(x)[:, 0].reshape((-1, 1))
wrapper.set_data(x, y)
np.testing.assert_allclose(wrapper.X, x)
np.testing.assert_allclose(wrapper.Y, y)
def test_optimize_smoke(wrapper: mw.GPyTorchModelWrapper) -> None:
wrapper.optimize()
class TestShapes:
"""Simple tests"""
def test_predict(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray) -> None:
n = len(test_x)
means, var = wrapper.predict(test_x)
assert isinstance(means, np.ndarray)
assert isinstance(var, np.ndarray)
assert means.shape == (n, 1) == var.shape
def test_predict_with_full_covariance(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray) -> None:
n = len(test_x)
means, cov = wrapper.predict_with_full_covariance(test_x)
assert isinstance(means, np.ndarray)
assert isinstance(cov, np.ndarray)
assert cov.shape == (n, n)
def test_get_prediction_gradients(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray) -> None:
n, input_dim = test_x.shape
mean_grad, var_grad = wrapper.get_prediction_gradients(test_x)
assert mean_grad.shape == var_grad.shape == (n, input_dim)
def test_get_joint_prediction_gradients(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray) -> None:
n, input_dim = test_x.shape
mean_joint_grad, cov_joint_grad = wrapper.get_joint_prediction_gradients(test_x)
assert mean_joint_grad.shape == (n, n, input_dim)
assert cov_joint_grad.shape == (n, n, n, input_dim)
def test_get_covariance_between_points(
self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray, test_x2: np.ndarray
) -> None:
cross_cov = wrapper.get_covariance_between_points(test_x, test_x2)
assert isinstance(cross_cov, np.ndarray)
assert cross_cov.shape == (len(test_x), len(test_x2))
def test_get_covariance_between_points_gradients(
self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray, test_x2: np.ndarray
) -> None:
n1, d = test_x.shape
n2 = test_x2.shape[0]
cross_cov_grad = wrapper.get_covariance_between_points_gradients(test_x, test_x2)
assert isinstance(cross_cov_grad, np.ndarray)
assert cross_cov_grad.shape == (n1, n2, d)
class TestInternalConsistency:
"""Many functions in the API calculate the same thing. This test suite checks if they are consistent."""
def test_predict_with_full_covariance(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray) -> None:
means, var = wrapper.predict(test_x)
means_fc, covar = wrapper.predict_with_full_covariance(test_x)
np.testing.assert_allclose(means, means_fc)
np.testing.assert_allclose(np.diag(covar), var.ravel())
cov = wrapper.predict_covariance(test_x)
np.testing.assert_allclose(cov, covar)
def test_get_covariance_between_points__same_as_full_covariance(
self,
wrapper: mw.GPyTorchModelWrapper,
test_x: np.ndarray,
test_x2: np.ndarray,
):
cross_covar = wrapper.get_covariance_between_points(test_x, test_x2)
_, full_covar = wrapper.predict_with_full_covariance(np.concatenate((test_x, test_x2), axis=-2))
n1 = test_x.shape[-2] # num. points in test_x
# The [:n1, n1:] sub-matrix of full-covariance should be the same as cross-covariance
assert pytest.approx(cross_covar, abs=1e-12, rel=1e-10) == full_covar[:n1, n1:]
def test_get_covariance_between_points_gradients__same_as_full_covariance(
self,
wrapper: mw.GPyTorchModelWrapper,
test_x: np.ndarray,
test_x2: np.ndarray,
):
cross_covar_grad = wrapper.get_covariance_between_points_gradients(test_x, test_x2)
_, full_cov_grad = wrapper.get_joint_prediction_gradients(np.concatenate((test_x, test_x2), axis=-2))
n1 = test_x.shape[-2] # num. points in test_x
# The gradient of the right submatrix of full covariance wrt to first n1 points of the passed
# set of points should be the same as gradient of cross-covariance
cross_cov_grad_from_full_cov_grad = full_cov_grad[:n1, n1:, :n1, :]
cross_cov_grad_from_full_cov_grad = np.diagonal(cross_cov_grad_from_full_cov_grad, axis1=0, axis2=2)
cross_cov_grad_from_full_cov_grad = cross_cov_grad_from_full_cov_grad.transpose((2, 0, 1))
assert pytest.approx(cross_covar_grad, abs=1e-12, rel=1e-10) == cross_cov_grad_from_full_cov_grad
def test_get_covariance_between_points__invariant_to_set_swap(
self,
wrapper: mw.GPyTorchModelWrapper,
test_x: np.ndarray,
test_x2: np.ndarray,
):
# cross-covariance between X1 and X2 should be the transpose of cross-covariance between X2 and X1
cross_covar1 = wrapper.get_covariance_between_points(test_x, test_x2)
cross_covar2 = wrapper.get_covariance_between_points(test_x2, test_x)
assert pytest.approx(cross_covar1, rel=1e-12, abs=1e-12) == cross_covar2.T
class TestBatching:
"""Tests if adding a batch dimension works as expected."""
@pytest.mark.parametrize("batch_size", (2, 5, 10))
def test_predict_covariance(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray, batch_size: int) -> None:
np.random.seed(20)
n_points, input_dim = test_x.shape
test_x_batched = np.random.rand(batch_size, n_points, input_dim)
cov_batched = wrapper.predict_covariance(test_x_batched)
assert isinstance(cov_batched, np.ndarray)
assert cov_batched.shape == (batch_size, n_points, n_points)
cov_naive = np.asarray([wrapper.predict_covariance(arr) for arr in test_x_batched])
np.testing.assert_allclose(cov_batched, cov_naive)
@pytest.mark.parametrize("batch_size", (5, 10))
@pytest.mark.parametrize("n_points2", (3, 20))
def test_get_covariance_between_points(
self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray, batch_size: int, n_points2: int
) -> None:
np.random.seed(20)
n_points1, input_dim = test_x.shape
test_x1_batched = np.random.rand(batch_size, n_points1, input_dim)
test_x2_batched = np.random.rand(batch_size, n_points2, input_dim)
cross_cov = wrapper.get_covariance_between_points(test_x1_batched, test_x2_batched)
assert isinstance(cross_cov, np.ndarray)
assert cross_cov.shape == (batch_size, n_points1, n_points2)
cross_cov_naive = np.asarray(
[wrapper.get_covariance_between_points(x1, x2) for x1, x2 in zip(test_x1_batched, test_x2_batched)]
)
np.testing.assert_allclose(cross_cov, cross_cov_naive)
@pytest.mark.parametrize("batch_size", (5, 10))
@pytest.mark.parametrize("n_points2", (3, 20))
def test_get_covariance_between_points_gradients(
self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray, batch_size: int, n_points2: int
) -> None:
np.random.seed(20)
n_points1, input_dim = test_x.shape
test_x1_batched = np.random.rand(batch_size, n_points1, input_dim)
test_x2_batched = np.random.rand(batch_size, n_points2, input_dim)
print(f"batch: {batch_size}, n1: {n_points1}, n2: {n_points2}, d: {input_dim}")
cross_cov_grad = wrapper.get_covariance_between_points_gradients(test_x1_batched, test_x2_batched)
assert isinstance(cross_cov_grad, np.ndarray)
assert cross_cov_grad.shape == (batch_size, n_points1, n_points2, input_dim)
cross_cov_grad_naive = np.asarray(
[
wrapper.get_covariance_between_points_gradients(x1, x2)
for x1, x2 in zip(test_x1_batched, test_x2_batched)
]
)
np.testing.assert_allclose(cross_cov_grad, cross_cov_grad_naive)
class TestEmukitPorted:
"""Tests ported from Emukit's GPyModelWrapper."""
epsilon = 1e-5
def test_joint_prediction_gradients(self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray):
wrapper.optimize()
mean, cov = wrapper.predict_with_full_covariance(test_x)
# Get the gradients
mean_dx, cov_dx = wrapper.get_joint_prediction_gradients(test_x)
for i in range(test_x.shape[0]): # Iterate over each test point
for j in range(test_x.shape[1]): # Iterate over each dimension
# Approximate the gradient numerically
perturbed_input = test_x.copy()
perturbed_input[i, j] += self.epsilon
mean_perturbed, cov_perturbed = wrapper.predict_with_full_covariance(perturbed_input)
mean_dx_numerical = (mean_perturbed - mean) / self.epsilon
cov_dx_numerical = (cov_perturbed - cov) / self.epsilon
# Check that numerical approx. similar to true gradient
assert pytest.approx(mean_dx_numerical.ravel(), abs=1e-8, rel=1e-2) == mean_dx[:, i, j]
assert pytest.approx(cov_dx_numerical, abs=1e-8, rel=1e-2) == cov_dx[:, :, i, j]
def test_get_covariance_between_points_gradients(
self, wrapper: mw.GPyTorchModelWrapper, test_x: np.ndarray, test_x2: np.ndarray
):
wrapper.optimize()
cov = wrapper.get_covariance_between_points(test_x, test_x2)
# Get the gradients
cov_dx = wrapper.get_covariance_between_points_gradients(test_x, test_x2)
for i in range(test_x.shape[0]): # Iterate over each test point
for j in range(test_x.shape[1]): # Iterate over each dimension
# Approximate the gradient numerically
perturbed_input = test_x.copy()
perturbed_input[i, j] += self.epsilon
cov_perturbed = wrapper.get_covariance_between_points(perturbed_input, test_x2)
cov_dx_numerical = (cov_perturbed[i] - cov[i]) / self.epsilon
# Check that numerical approx. similar to true gradient
assert pytest.approx(cov_dx_numerical, abs=1e-8, rel=1e-2) == cov_dx[i, :, j]
class TestAgainstGPy:
"""As we trust the implementation with GPy backend more, we check whether this new implementation provides
consistent answers."""
def test_predict(self, wrapper: mw.GPyTorchModelWrapper, gpy_wrapper: GPyModelWrapper, test_x: np.ndarray) -> None:
mean1, var1 = wrapper.predict(test_x)
mean2, var2 = gpy_wrapper.predict(test_x)
np.testing.assert_allclose(mean1, mean2)
np.testing.assert_allclose(var1, var2, rtol=1e-3)
def test_predict_with_full_covariance(
self, wrapper: mw.GPyTorchModelWrapper, gpy_wrapper: GPyModelWrapper, test_x: np.ndarray
) -> None:
mean1, cov1 = wrapper.predict_with_full_covariance(test_x)
mean2, cov2 = gpy_wrapper.predict_with_full_covariance(test_x)
np.testing.assert_allclose(mean1, mean2, rtol=1e-3)
np.testing.assert_allclose(cov1, cov2, rtol=1e-3)
def test_get_prediction_gradients(
self, wrapper: mw.GPyTorchModelWrapper, gpy_wrapper: GPyModelWrapper, test_x: np.ndarray
) -> None:
mean_grad1, var_grad1 = wrapper.get_prediction_gradients(test_x)
mean_grad2, var_grad2 = gpy_wrapper.get_prediction_gradients(test_x)
np.testing.assert_allclose(mean_grad1, mean_grad2, rtol=1e-3)
np.testing.assert_allclose(var_grad1, var_grad2, rtol=1e-3)
def test_get_covariance_between_points_gradients(
self,
wrapper: mw.GPyTorchModelWrapper,
gpy_wrapper: GPyModelWrapper,
test_x: np.ndarray,
test_x2: np.ndarray,
) -> None:
xcov_grad1 = wrapper.get_covariance_between_points_gradients(test_x, test_x2)
xcov_grad2 = gpy_wrapper.get_covariance_between_points_gradients(test_x, test_x2)
np.testing.assert_allclose(xcov_grad1, xcov_grad2, rtol=1e-3)
| [
"numpy.diag",
"numpy.random.seed",
"gp.model_wrapper.GPyTorchModelWrapper",
"GPy.models.GPRegression",
"numpy.concatenate",
"gp.model_wrapper._map_to_tensor",
"pytest.approx",
"emukit.model_wrappers.gpy_model_wrappers.GPyModelWrapper",
"numpy.sin",
"pytest.mark.parametrize",
"numpy.cos",
"nump... | [((564, 588), 'numpy.random.seed', 'np.random.seed', (['(31415926)'], {}), '(31415926)\n', (578, 588), True, 'import numpy as np\n'), ((600, 621), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (614, 621), True, 'import numpy as np\n'), ((798, 816), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (812, 816), True, 'import numpy as np\n'), ((828, 848), 'numpy.random.rand', 'np.random.rand', (['(7)', '(2)'], {}), '(7, 2)\n', (842, 848), True, 'import numpy as np\n'), ((900, 919), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (914, 919), True, 'import numpy as np\n'), ((931, 952), 'numpy.random.rand', 'np.random.rand', (['(15)', '(2)'], {}), '(15, 2)\n', (945, 952), True, 'import numpy as np\n'), ((1067, 1093), 'gp.model_wrapper._map_to_tensor', 'mw._map_to_tensor', (['train_x'], {}), '(train_x)\n', (1084, 1093), True, 'import gp.model_wrapper as mw\n'), ((1108, 1139), 'gp.model_wrapper._outputs_remove_dim', 'mw._outputs_remove_dim', (['train_y'], {}), '(train_y)\n', (1130, 1139), True, 'import gp.model_wrapper as mw\n'), ((1153, 1181), 'gp.model_wrapper.GPModel', 'mw.GPModel', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (1163, 1181), True, 'import gp.model_wrapper as mw\n'), ((1321, 1351), 'gp.model_wrapper.GPyTorchModelWrapper', 'mw.GPyTorchModelWrapper', (['model'], {}), '(model)\n', (1344, 1351), True, 'import gp.model_wrapper as mw\n'), ((1460, 1501), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (1483, 1501), False, 'import GPy\n'), ((1636, 1658), 'emukit.model_wrappers.gpy_model_wrappers.GPyModelWrapper', 'GPyModelWrapper', (['model'], {}), '(model)\n', (1651, 1658), False, 'from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper\n'), ((1839, 1885), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['wrapper.X', 'train_x'], {}), '(wrapper.X, train_x)\n', (1865, 1885), True, 'import numpy as np\n'), ((1890, 1936), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['wrapper.Y', 'train_y'], {}), '(wrapper.Y, train_y)\n', (1916, 1936), True, 'import numpy as np\n'), ((2004, 2022), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (2018, 2022), True, 'import numpy as np\n'), ((2031, 2052), 'numpy.random.rand', 'np.random.rand', (['(20)', '(2)'], {}), '(20, 2)\n', (2045, 2052), True, 'import numpy as np\n'), ((2125, 2165), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['wrapper.X', 'x'], {}), '(wrapper.X, x)\n', (2151, 2165), True, 'import numpy as np\n'), ((2170, 2210), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['wrapper.Y', 'y'], {}), '(wrapper.Y, y)\n', (2196, 2210), True, 'import numpy as np\n'), ((7200, 7249), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '(2, 5, 10)'], {}), "('batch_size', (2, 5, 10))\n", (7223, 7249), False, 'import pytest\n'), ((7857, 7903), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '(5, 10)'], {}), "('batch_size', (5, 10))\n", (7880, 7903), False, 'import pytest\n'), ((7909, 7954), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_points2"""', '(3, 20)'], {}), "('n_points2', (3, 20))\n", (7932, 7954), False, 'import pytest\n'), ((8777, 8823), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '(5, 10)'], {}), "('batch_size', (5, 10))\n", (8800, 8823), False, 'import pytest\n'), ((8829, 8874), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_points2"""', '(3, 20)'], {}), "('n_points2', (3, 20))\n", (8852, 8874), False, 'import pytest\n'), ((4735, 4778), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['means', 'means_fc'], {}), '(means, means_fc)\n', (4761, 4778), True, 'import numpy as np\n'), ((4901, 4939), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cov', 'covar'], {}), '(cov, covar)\n', (4927, 4939), True, 'import numpy as np\n'), ((6305, 6369), 'numpy.diagonal', 'np.diagonal', (['cross_cov_grad_from_full_cov_grad'], {'axis1': '(0)', 'axis2': '(2)'}), '(cross_cov_grad_from_full_cov_grad, axis1=0, axis2=2)\n', (6316, 6369), True, 'import numpy as np\n'), ((7376, 7394), 'numpy.random.seed', 'np.random.seed', (['(20)'], {}), '(20)\n', (7390, 7394), True, 'import numpy as np\n'), ((7464, 7511), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_points', 'input_dim'], {}), '(batch_size, n_points, input_dim)\n', (7478, 7511), True, 'import numpy as np\n'), ((7800, 7850), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cov_batched', 'cov_naive'], {}), '(cov_batched, cov_naive)\n', (7826, 7850), True, 'import numpy as np\n'), ((8122, 8140), 'numpy.random.seed', 'np.random.seed', (['(20)'], {}), '(20)\n', (8136, 8140), True, 'import numpy as np\n'), ((8212, 8260), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_points1', 'input_dim'], {}), '(batch_size, n_points1, input_dim)\n', (8226, 8260), True, 'import numpy as np\n'), ((8287, 8335), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_points2', 'input_dim'], {}), '(batch_size, n_points2, input_dim)\n', (8301, 8335), True, 'import numpy as np\n'), ((8716, 8770), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cross_cov', 'cross_cov_naive'], {}), '(cross_cov, cross_cov_naive)\n', (8742, 8770), True, 'import numpy as np\n'), ((9052, 9070), 'numpy.random.seed', 'np.random.seed', (['(20)'], {}), '(20)\n', (9066, 9070), True, 'import numpy as np\n'), ((9142, 9190), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_points1', 'input_dim'], {}), '(batch_size, n_points1, input_dim)\n', (9156, 9190), True, 'import numpy as np\n'), ((9217, 9265), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_points2', 'input_dim'], {}), '(batch_size, n_points2, input_dim)\n', (9231, 9265), True, 'import numpy as np\n'), ((9832, 9896), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cross_cov_grad', 'cross_cov_grad_naive'], {}), '(cross_cov_grad, cross_cov_grad_naive)\n', (9858, 9896), True, 'import numpy as np\n'), ((12514, 12554), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean1', 'mean2'], {}), '(mean1, mean2)\n', (12540, 12554), True, 'import numpy as np\n'), ((12563, 12613), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['var1', 'var2'], {'rtol': '(0.001)'}), '(var1, var2, rtol=0.001)\n', (12589, 12613), True, 'import numpy as np\n'), ((12916, 12968), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean1', 'mean2'], {'rtol': '(0.001)'}), '(mean1, mean2, rtol=0.001)\n', (12942, 12968), True, 'import numpy as np\n'), ((12976, 13026), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cov1', 'cov2'], {'rtol': '(0.001)'}), '(cov1, cov2, rtol=0.001)\n', (13002, 13026), True, 'import numpy as np\n'), ((13337, 13399), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean_grad1', 'mean_grad2'], {'rtol': '(0.001)'}), '(mean_grad1, mean_grad2, rtol=0.001)\n', (13363, 13399), True, 'import numpy as np\n'), ((13407, 13467), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['var_grad1', 'var_grad2'], {'rtol': '(0.001)'}), '(var_grad1, var_grad2, rtol=0.001)\n', (13433, 13467), True, 'import numpy as np\n'), ((13873, 13935), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['xcov_grad1', 'xcov_grad2'], {'rtol': '(0.001)'}), '(xcov_grad1, xcov_grad2, rtol=0.001)\n', (13899, 13935), True, 'import numpy as np\n'), ((4814, 4828), 'numpy.diag', 'np.diag', (['covar'], {}), '(covar)\n', (4821, 4828), True, 'import numpy as np\n'), ((5268, 5310), 'numpy.concatenate', 'np.concatenate', (['(test_x, test_x2)'], {'axis': '(-2)'}), '((test_x, test_x2), axis=-2)\n', (5282, 5310), True, 'import numpy as np\n'), ((5477, 5525), 'pytest.approx', 'pytest.approx', (['cross_covar'], {'abs': '(1e-12)', 'rel': '(1e-10)'}), '(cross_covar, abs=1e-12, rel=1e-10)\n', (5490, 5525), False, 'import pytest\n'), ((5908, 5950), 'numpy.concatenate', 'np.concatenate', (['(test_x, test_x2)'], {'axis': '(-2)'}), '((test_x, test_x2), axis=-2)\n', (5922, 5950), True, 'import numpy as np\n'), ((6484, 6537), 'pytest.approx', 'pytest.approx', (['cross_covar_grad'], {'abs': '(1e-12)', 'rel': '(1e-10)'}), '(cross_covar_grad, abs=1e-12, rel=1e-10)\n', (6497, 6537), False, 'import pytest\n'), ((7041, 7090), 'pytest.approx', 'pytest.approx', (['cross_covar1'], {'rel': '(1e-12)', 'abs': '(1e-12)'}), '(cross_covar1, rel=1e-12, abs=1e-12)\n', (7054, 7090), False, 'import pytest\n'), ((706, 721), 'numpy.cos', 'np.cos', (['train_x'], {}), '(train_x)\n', (712, 721), True, 'import numpy as np\n'), ((2061, 2070), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2067, 2070), True, 'import numpy as np\n'), ((11048, 11100), 'pytest.approx', 'pytest.approx', (['cov_dx_numerical'], {'abs': '(1e-08)', 'rel': '(0.01)'}), '(cov_dx_numerical, abs=1e-08, rel=0.01)\n', (11061, 11100), False, 'import pytest\n'), ((12055, 12107), 'pytest.approx', 'pytest.approx', (['cov_dx_numerical'], {'abs': '(1e-08)', 'rel': '(0.01)'}), '(cov_dx_numerical, abs=1e-08, rel=0.01)\n', (12068, 12107), False, 'import pytest\n')] |
'''
程序功能,先确定图片文件夹路径/标记结果路径/以及标记车位个数(小于6)
F5运行后鼠标选择车位
按上下显示上下图片
按左右显示标记到的图片
'''
import cv2
import numpy as np
import os
from detect_utils import parking_line
from detect_utils import show_parking_line
path_img = r'W:\dataset\inroad_parking_videos\pics\2020_01_10\DDT2G1907ZMY00124SY' # 路径
path_txt_for_check = os.path.split(path_img)[0] + '\\' + os.path.split(path_img)[-1] + '_label.txt'
parking_num = 7 # 停车位个数
bias = 60 # 车位与状态标记在图片上的偏移
h_l, h_h = 10, 2000 # 显示图像的上下边界
ip_name = path_img.split('\\')[-1].split('_')[-1]
if ip_name == '1010': # 待改进 针对 DDT2G1907ZMY00009SY_1010
ip_name = path_img.split('\\')[-1]
parking_list_np = np.array(parking_line[ip_name]).astype(int)
# ip与停车位关系
if ip_name == '252':
parking_num = 3
elif ip_name == '261':
parking_num = 4
elif ip_name == '262':
parking_num = 4
elif ip_name == '177':
parking_num = 3
elif ip_name == '175':
parking_num = 3
font = cv2.FONT_HERSHEY_SIMPLEX # 使用默认字体
pos_show = []
pos_show_bias = []
state_loc = []
act = 0
idx_togo = 0
# #0.读取文件列表
# name_all_list = os.listdir(path_img)
# 1.读取一个txt文件
data_raw = []
with open(path_txt_for_check, 'r', encoding='UTF-8') as file_to_read:
lines = file_to_read.readlines() # 整行读取数据
for line in lines:
if line != '\n':
data_raw.append(line)
# 2.转换文件为np.array
data_raw_np = []
for i in data_raw:
for idx, j in enumerate(i.split(' ')):
if idx:
data_raw_np.append(float(j.split(':')[1]))
else:
pass
# data_raw_np.append(int(j))
data_raw_np = np.array(data_raw_np)
# now we get the data
data_raw_np = data_raw_np.reshape(-1, len(data_raw[0].split(' ')) - 1)
data_raw_np = data_raw_np.astype(int)
row_fin = len(data_raw)
# 设置图像大小
global h
h = h_h - h_l
list_img = os.listdir(path_img)
list_img = [i for i in list_img if i.endswith('jpg')]
list_img.sort() # 排序
os.chdir(path_img)
im_data = cv2.imread(list_img[0])
h, w = im_data.shape[0:2]
x_panel = int(w * 0.85)
y_panel = int(h * 0.15)
y_panel_bias = y_panel - int(0.05 * h)
y_panel_bias2 = y_panel - 2 * int(0.05 * h)
font_size = w // 1000
bias = int(w * 0.025) # 车位与状态标记在图片上的偏移
bias_y = int(h * -0.05) # 车位idx号位置的偏移
font_width = int((h + w) / 1000)
if h == 0:
w_w, w_h = (w // 8) * 3, (h // 8) * 3
h_l = 0
h_h = h
else:
w_w, w_h = (w // 8) * 3, (h // 8) * 3
def check_format(r):
if r[2] >= 60:
r[2] -= 60
r[1] += 1
if r[1] >= 60:
r[1] -= 60
r[0] += 1
# if r[0] >= 24:
# r[0] -= 24
return r
def sec2img_name(data_np, pic_one_name_np):
name_raw = []
for i in data_np:
tmp = [i[0] // 3600, i[0] % 3600 // 60, (i[0] % 3600) % 60]
tmp = np.array(tmp)
tmp = tmp + pic_one_name_np
name_raw.append(tmp)
name_list = []
for j in name_raw:
tmp = check_format(j)
name_list.append(str(tmp[0]).zfill(2) + '_' + str(tmp[1]).zfill(2) + '_' + str(tmp[2]).zfill(2) + '.jpg')
return name_list
for i in range(parking_num):
pos_show.append([0, 0])
pos_show_bias.append([0, 0])
state_loc.append(0)
# list_img = os.listdir(path_img)
# list_img.sort() # 排序
list_img_time_only = ['_'.join(i.split('.')[0].split('_')[:3]) + '.jpg' for i in list_img]
h_0, m_0, s_0 = os.path.splitext(list_img[0])[0].split('_')[:3]
pic_one_name_np = np.array([int(h_0), int(m_0), int(s_0)])
name = sec2img_name(data_raw_np, pic_one_name_np)
os.chdir(path_img)
img_len = len(list_img)
idx = 0
h_0, m_0, s_0 = os.path.splitext(list_img[0])[0].split('_')[:3]
h_1, m_1, s_1 = os.path.splitext(list_img[1])[0].split('_')[:3]
# 两幅图片时间间隔
gap_sec = (int(h_1) - int(h_0)) * 3600 + \
(int(m_1) - int(m_0)) * 60 + (int(s_1) - int(s_0))
one_min = 60 // gap_sec
five_mins = 300 // gap_sec
if one_min == 0:
one_min = 2
if five_mins == 0:
five_mins = 10
# 全部图片时间秒
h_f, m_f, s_f = os.path.splitext(list_img[img_len - 1])[0].split('_')[:3]
h_sum = int(h_f) - int(h_0)
m_sum = int(m_f) - int(m_0)
s_sum = int(s_f) - int(s_0)
sec_sum = h_sum * 3600 + m_sum * 60 + s_sum
img_data = cv2.imread(list_img[0])
record_list = []
click_time = 0
def get_p(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
global click_time
global pos_show
global pos_show_bias
if click_time < parking_num:
pos_show[click_time] = [x, y]
# pos_show_bias[click_time] = [x+bias,y]
pos_show_bias[click_time] = [x - bias // 4, y - bias]
click_time += 1
print(x, y)
global idx_txt
idx_txt = 0
cv2.namedWindow('img', 0)
cv2.startWindowThread()
cv2.resizeWindow('img', w_w, w_h) # 宽,高
while idx < img_len:
img_data = cv2.imread(list_img[idx])
if img_data is None:
idx += 1
continue
img_data = cv2.imread(list_img[idx])
img_data = show_parking_line(img_data, parking_list_np, 4) # 画停车线
img_data = img_data[h_l:h_h, :, :]
h, m, s = os.path.splitext(list_img[idx])[0].split('_')[:3]
h_p = int(h) - int(h_0)
m_p = int(m) - int(m_0)
s_p = int(s) - int(s_0)
sec_pass = h_p * 3600 + m_p * 60 + s_p
img_data = cv2.putText(img_data, data_raw[idx_txt].split(' ')[0], (x_panel, y_panel),
font, font_size, (0, 255, 255), font_width) # 添加文字,1.2表示字体大小,(3000,260)是初始的位置,(0,0,255)表示颜色,4表示粗细
img_data = cv2.putText(img_data, '_'.join(os.path.splitext(list_img[idx])[0].split('_')[:3]), (x_panel, y_panel_bias),
font, font_size, (0, 0, 255), font_width) # 添加文字,1.2表示字体大小,(3000,260)是初始的位置,(0,0,255)表示颜色,4表示粗细
img_data = cv2.putText(img_data, str(int(sec_sum - sec_pass) // 60) + 'm' + str(int(sec_sum - sec_pass) % 60) + 's', (x_panel, y_panel_bias2),
font, font_size, (0, 255, 0), font_width) # 添加文字,1.2表示字体大小,(3000,100)是初始的位置,(0,255,0)表示颜色,4表示粗细
for i in range(parking_num):
# 车位
img_data = cv2.putText(img_data, str(i),
(pos_show[i][0], pos_show[i][1]), font, font_size, (255, 0, 255), font_width)
# 状态标记
img_data = cv2.putText(img_data, str(int(state_loc[i])),
(pos_show_bias[i][0], pos_show_bias[i][1]), font, font_size + 1, (0, 0, 255), font_width)
cv2.setMouseCallback('img', get_p, 0) # 鼠标操作回调函数
cv2.imshow('img', img_data)
key = cv2.waitKeyEx(30) # 等待按键
if act == 1 and idx < idx_togo:
idx += 1
else:
act = 0
if key == 27: # ESC
break
elif key == ord('s') or key == ord('S'): # s 前进1min
act = 0
idx += one_min
elif key == ord('d') or key == ord('D'): # d前进到下一个状态变化点
act = 0
idx_txt += 1
if idx_txt < row_fin:
if data_raw[idx_txt].split(' ')[0] + '.jpg' not in list_img_time_only:
print('==>', end=' ')
print(data_raw[idx_txt])
continue
idx = list_img_time_only.index(data_raw[idx_txt].split(' ')[0] + '.jpg')
for i in range(parking_num):
state_loc[i] = data_raw_np[idx_txt][i]
else:
idx_txt = row_fin - 1
idx = len(list_img) - 1
elif key == ord('a') or key == ord('A'): # 后退一个状态变化点
act = 0
idx_txt -= 1
if idx_txt >= 0:
if data_raw[idx_txt].split(' ')[0] + '.jpg' not in list_img_time_only:
print('==>', end=' ')
print(data_raw[idx_txt])
continue
idx = list_img_time_only.index(data_raw[idx_txt].split(' ')[0] + '.jpg')
for i in range(parking_num):
state_loc[i] = data_raw_np[idx_txt][i]
else:
idx_txt = 0
idx = list_img_time_only.index(data_raw[idx_txt].split(' ')[0] + '.jpg')
elif key == ord('w') or key == ord('W'): # w 后退1min
act = 0
if idx > 0:
idx -= one_min
else:
idx = 0
elif key == 2490368: # 上
act = 0
if idx > 0:
idx -= 1
else:
idx = 0
elif key == 2621440: # 下
act = 0
idx += 1
elif key == 2424832: # 左
act = 0
idx_txt -= 1
if idx_txt >= 0:
if data_raw[idx_txt].split(' ')[0] + '.jpg' not in list_img_time_only: # 若不在列表
print('==>', end=' ')
print(data_raw[idx_txt])
continue
idx = list_img_time_only.index(
data_raw[idx_txt].split(' ')[0] + '.jpg') # 若在列表
for i in range(parking_num):
state_loc[i] = data_raw_np[idx_txt][i]
else:
idx_txt = 0
idx = list_img_time_only.index(data_raw[idx_txt].split(' ')[0] + '.jpg')
elif key == 2555904: # 右
idx_txt += 1
if idx_txt < row_fin:
act = 1
if data_raw[idx_txt].split(' ')[0] + '.jpg' not in list_img_time_only:
idx_togo = 0
print('==>', end=' ')
print(data_raw[idx_txt])
tmp = np.array(list_img)
tmp = tmp < (data_raw[idx_txt].split(' ')[0] + '.jpg')
idx_togo += tmp.sum() - 1
continue
idx += 1
for i in range(parking_num):
state_loc[i] = data_raw_np[idx_txt - 1][i]
idx_togo = list_img_time_only.index(data_raw[idx_txt].split(' ')[0] + '.jpg')
else:
idx_txt = row_fin - 1
idx = len(list_img) - 1
elif key == ord(' '): # 空格控制act
for i in range(parking_num):
state_loc[i] = data_raw_np[idx_txt][i]
if key != 27:
print('===========================')
print('fin')
cv2.destroyAllWindows()
| [
"cv2.imshow",
"cv2.waitKeyEx",
"cv2.imread",
"detect_utils.show_parking_line",
"numpy.array",
"cv2.setMouseCallback",
"os.chdir",
"cv2.startWindowThread",
"os.path.splitext",
"cv2.resizeWindow",
"cv2.destroyAllWindows",
"os.listdir",
"cv2.namedWindow",
"os.path.split"
] | [((1562, 1583), 'numpy.array', 'np.array', (['data_raw_np'], {}), '(data_raw_np)\n', (1570, 1583), True, 'import numpy as np\n'), ((1783, 1803), 'os.listdir', 'os.listdir', (['path_img'], {}), '(path_img)\n', (1793, 1803), False, 'import os\n'), ((1880, 1898), 'os.chdir', 'os.chdir', (['path_img'], {}), '(path_img)\n', (1888, 1898), False, 'import os\n'), ((1909, 1932), 'cv2.imread', 'cv2.imread', (['list_img[0]'], {}), '(list_img[0])\n', (1919, 1932), False, 'import cv2\n'), ((3437, 3455), 'os.chdir', 'os.chdir', (['path_img'], {}), '(path_img)\n', (3445, 3455), False, 'import os\n'), ((4074, 4097), 'cv2.imread', 'cv2.imread', (['list_img[0]'], {}), '(list_img[0])\n', (4084, 4097), False, 'import cv2\n'), ((4569, 4594), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', '(0)'], {}), "('img', 0)\n", (4584, 4594), False, 'import cv2\n'), ((4595, 4618), 'cv2.startWindowThread', 'cv2.startWindowThread', ([], {}), '()\n', (4616, 4618), False, 'import cv2\n'), ((4619, 4652), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""img"""', 'w_w', 'w_h'], {}), "('img', w_w, w_h)\n", (4635, 4652), False, 'import cv2\n'), ((9718, 9741), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9739, 9741), False, 'import cv2\n'), ((4697, 4722), 'cv2.imread', 'cv2.imread', (['list_img[idx]'], {}), '(list_img[idx])\n', (4707, 4722), False, 'import cv2\n'), ((4798, 4823), 'cv2.imread', 'cv2.imread', (['list_img[idx]'], {}), '(list_img[idx])\n', (4808, 4823), False, 'import cv2\n'), ((4839, 4886), 'detect_utils.show_parking_line', 'show_parking_line', (['img_data', 'parking_list_np', '(4)'], {}), '(img_data, parking_list_np, 4)\n', (4856, 4886), False, 'from detect_utils import show_parking_line\n'), ((6274, 6311), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""img"""', 'get_p', '(0)'], {}), "('img', get_p, 0)\n", (6294, 6311), False, 'import cv2\n'), ((6328, 6355), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img_data'], {}), "('img', img_data)\n", (6338, 6355), False, 'import cv2\n'), ((6366, 6383), 'cv2.waitKeyEx', 'cv2.waitKeyEx', (['(30)'], {}), '(30)\n', (6379, 6383), False, 'import cv2\n'), ((643, 674), 'numpy.array', 'np.array', (['parking_line[ip_name]'], {}), '(parking_line[ip_name])\n', (651, 674), True, 'import numpy as np\n'), ((2708, 2721), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (2716, 2721), True, 'import numpy as np\n'), ((348, 371), 'os.path.split', 'os.path.split', (['path_img'], {}), '(path_img)\n', (361, 371), False, 'import os\n'), ((312, 335), 'os.path.split', 'os.path.split', (['path_img'], {}), '(path_img)\n', (325, 335), False, 'import os\n'), ((3279, 3308), 'os.path.splitext', 'os.path.splitext', (['list_img[0]'], {}), '(list_img[0])\n', (3295, 3308), False, 'import os\n'), ((3504, 3533), 'os.path.splitext', 'os.path.splitext', (['list_img[0]'], {}), '(list_img[0])\n', (3520, 3533), False, 'import os\n'), ((3568, 3597), 'os.path.splitext', 'os.path.splitext', (['list_img[1]'], {}), '(list_img[1])\n', (3584, 3597), False, 'import os\n'), ((3876, 3915), 'os.path.splitext', 'os.path.splitext', (['list_img[img_len - 1]'], {}), '(list_img[img_len - 1])\n', (3892, 3915), False, 'import os\n'), ((4948, 4979), 'os.path.splitext', 'os.path.splitext', (['list_img[idx]'], {}), '(list_img[idx])\n', (4964, 4979), False, 'import os\n'), ((5389, 5420), 'os.path.splitext', 'os.path.splitext', (['list_img[idx]'], {}), '(list_img[idx])\n', (5405, 5420), False, 'import os\n'), ((9070, 9088), 'numpy.array', 'np.array', (['list_img'], {}), '(list_img)\n', (9078, 9088), True, 'import numpy as np\n')] |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# from tensorflow.python.summary import event_accumulator
from tensorboard.backend.event_processing import event_accumulator
def get_min(event_file, scalar_str):
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
minval = 999999
minstep=0
for scalar in ea.Scalars(scalar_str):
if scalar.value< minval:
minstep = scalar.step
minval = scalar.value
return minstep, minval
def get_max(event_file, scalar_str):
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
maxval = -999999
optstep=0
for scalar in ea.Scalars(scalar_str):
if scalar.value> maxval:
optstep = scalar.step
maxval = scalar.value
return optstep, maxval
def get_opt_step_using_window(event_file, window_size):
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
epsilon = 0.0001
opt_lik=-99999.0
optstep=0
scalars = ea.Scalars('val_lik')
prev_avg = -99999.0
for i in range(0,len(scalars),window_size):
cur_avg=0.0
for scalar in scalars[i:i+window_size]:
cur_avg += scalar.value
cur_avg /= window_size
if cur_avg<(prev_avg-epsilon):
#find max in prev window to set as optimal
maxval = -999999.0
maxstep=0
for s in scalars[i-window_size:i]:
if s.value> maxval:
maxstep = s.step
maxval = s.value
optstep = maxstep
opt_lik = maxval
break
prev_avg = cur_avg
#compare max in last window with optimal if optstep is 0 i.e. increasing optimisation curve , peak not found
if optstep==0:
for s in scalars[len(scalars)-window_size:]: #[-1:-1-window_size:-1]:
if s.value> opt_lik:
optstep = s.step
opt_lik = s.value
return optstep, opt_lik
def get_opt_step(event_file):
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
optrmse = 999999
optlik = -999999
optstep=0
for scalar in ea.Scalars('val_lik'):
rmse = get_val_at_step(event_file, scalar.step, 'val_rmse')
if scalar.value> optlik and rmse< optrmse:
optstep = scalar.step
optrmse = rmse
optlik = scalar.value
return optstep
lc = np.stack(
[np.asarray([scalar.step, scalar.value])
for scalar in ea.Scalars(scalar_str)])
return(lc)
def get_val_at_step(event_file, step, scalar_str):
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
for scalar in ea.Scalars(scalar_str):
if scalar.step == step:
return scalar.value
def get_last_step(event_file):
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
return ea.Scalars('val_lik')[-1].step
import argparse
import os, glob
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-folder', '-dir', nargs = 1, required = True, help = 'Event files folder name')
parser.add_argument('-window_size', '-ws', nargs = 1, type = int, default = 0, help = 'window size:(0 for get max)(-1 to report last iteration value)(>0 for window over val_lik, usually >=5)')
args = parser.parse_args()
folder = args.folder[0]
window_size = args.window_size[0]
os.chdir('./'+folder)
files = glob.glob('model-tensorboard*')
rmse_list = []
lik_list = []
val_lik_list = []
val_rmse_list = []
folder = folder.split('/')[-1] # now folder = model folder
layer = int(folder.split('_')[1])
n=0;
for f in files:
n = n+ 1
kv = np.zeros((layer,n))
kls = np.zeros((layer,n))
i =0
for f in files:
# step,_ = get_min(f,'val_rmse')
# step = get_opt_step(f)
try:
if window_size ==0:
step, opt_val_lik = get_max(f, 'val_lik')
opt_val_rmse = get_val_at_step(f,step,'val_rmse')
elif window_size <0:
step = get_last_step(f)
opt_val_lik = get_val_at_step(f,step,'val_lik')
opt_val_rmse = get_val_at_step(f,step,'val_rmse')
else:
step, opt_val_lik = get_opt_step_using_window(f,window_size)
opt_val_rmse = get_val_at_step(f,step,'val_rmse')
print(f)
except:
print(f)
continue
rmse = get_val_at_step(f,step,'test_rmse')
lik = get_val_at_step(f,step,'test_lik')
val_rmse_list.append(opt_val_rmse)
val_lik_list.append(opt_val_lik)
rmse_list.append(rmse)
lik_list.append(lik)
print(str(step)+'\n')
for l in range(layer):
kls[l][i] = get_val_at_step(f,step,folder+'/layers/'+str(l)+'/kern/lengthscales_1')
kv[l][i] = get_val_at_step(f,step,folder+'/layers/'+str(l)+'/kern/variance_1')
i = i+1
file = open('Test_Result'+str(window_size)+'.txt','w')
file.write('Validation RMSE: mean:{}, dev:{}'.format(np.mean(val_rmse_list),np.std(val_rmse_list)))
file.write('\nValidation Likelihood: mean:{}, dev:{}'.format(np.mean(val_lik_list),np.std(val_lik_list)))
file.write('\nTest rmse: mean:{}, dev:{}'.format(np.mean(rmse_list),np.std(rmse_list)))
file.write('\nTest log likelihood: mean:{}, dev:{}'.format(np.mean(lik_list),np.std(lik_list)))
for l in range(layer):
file.write('\nLayer:{}'.format(l))
file.write('\nkernel lengthscale:')
file.write('mean:{}, dev:{}\n'.format(np.mean(kls[l]),np.std(kls[l])))
for j in range(n):
file.write('{},'.format(kls[l][j]))
file.write('\nkernel variance:')
file.write('mean:{}, dev:{}\n'.format(np.mean(kv[l]),np.std(kv[l])))
for j in range(n):
file.write('{},'.format(kv[l][j]))
file.close()
if __name__ == "__main__":
main() | [
"argparse.ArgumentParser",
"numpy.std",
"numpy.asarray",
"numpy.zeros",
"numpy.mean",
"tensorboard.backend.event_processing.event_accumulator.EventAccumulator",
"glob.glob",
"os.chdir"
] | [((758, 804), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'event_accumulator.EventAccumulator', (['event_file'], {}), '(event_file)\n', (792, 804), False, 'from tensorboard.backend.event_processing import event_accumulator\n'), ((1072, 1118), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'event_accumulator.EventAccumulator', (['event_file'], {}), '(event_file)\n', (1106, 1118), False, 'from tensorboard.backend.event_processing import event_accumulator\n'), ((1406, 1452), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'event_accumulator.EventAccumulator', (['event_file'], {}), '(event_file)\n', (1440, 1452), False, 'from tensorboard.backend.event_processing import event_accumulator\n'), ((2560, 2606), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'event_accumulator.EventAccumulator', (['event_file'], {}), '(event_file)\n', (2594, 2606), False, 'from tensorboard.backend.event_processing import event_accumulator\n'), ((3140, 3186), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'event_accumulator.EventAccumulator', (['event_file'], {}), '(event_file)\n', (3174, 3186), False, 'from tensorboard.backend.event_processing import event_accumulator\n'), ((3349, 3395), 'tensorboard.backend.event_processing.event_accumulator.EventAccumulator', 'event_accumulator.EventAccumulator', (['event_file'], {}), '(event_file)\n', (3383, 3395), False, 'from tensorboard.backend.event_processing import event_accumulator\n'), ((3513, 3538), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3536, 3538), False, 'import argparse\n'), ((3942, 3965), 'os.chdir', 'os.chdir', (["('./' + folder)"], {}), "('./' + folder)\n", (3950, 3965), False, 'import os, glob\n'), ((3976, 4007), 'glob.glob', 'glob.glob', (['"""model-tensorboard*"""'], {}), "('model-tensorboard*')\n", (3985, 4007), False, 'import os, glob\n'), ((4246, 4266), 'numpy.zeros', 'np.zeros', (['(layer, n)'], {}), '((layer, n))\n', (4254, 4266), True, 'import numpy as np\n'), ((4276, 4296), 'numpy.zeros', 'np.zeros', (['(layer, n)'], {}), '((layer, n))\n', (4284, 4296), True, 'import numpy as np\n'), ((2979, 3018), 'numpy.asarray', 'np.asarray', (['[scalar.step, scalar.value]'], {}), '([scalar.step, scalar.value])\n', (2989, 3018), True, 'import numpy as np\n'), ((5661, 5683), 'numpy.mean', 'np.mean', (['val_rmse_list'], {}), '(val_rmse_list)\n', (5668, 5683), True, 'import numpy as np\n'), ((5684, 5705), 'numpy.std', 'np.std', (['val_rmse_list'], {}), '(val_rmse_list)\n', (5690, 5705), True, 'import numpy as np\n'), ((5773, 5794), 'numpy.mean', 'np.mean', (['val_lik_list'], {}), '(val_lik_list)\n', (5780, 5794), True, 'import numpy as np\n'), ((5795, 5815), 'numpy.std', 'np.std', (['val_lik_list'], {}), '(val_lik_list)\n', (5801, 5815), True, 'import numpy as np\n'), ((5871, 5889), 'numpy.mean', 'np.mean', (['rmse_list'], {}), '(rmse_list)\n', (5878, 5889), True, 'import numpy as np\n'), ((5890, 5907), 'numpy.std', 'np.std', (['rmse_list'], {}), '(rmse_list)\n', (5896, 5907), True, 'import numpy as np\n'), ((5973, 5990), 'numpy.mean', 'np.mean', (['lik_list'], {}), '(lik_list)\n', (5980, 5990), True, 'import numpy as np\n'), ((5991, 6007), 'numpy.std', 'np.std', (['lik_list'], {}), '(lik_list)\n', (5997, 6007), True, 'import numpy as np\n'), ((6170, 6185), 'numpy.mean', 'np.mean', (['kls[l]'], {}), '(kls[l])\n', (6177, 6185), True, 'import numpy as np\n'), ((6186, 6200), 'numpy.std', 'np.std', (['kls[l]'], {}), '(kls[l])\n', (6192, 6200), True, 'import numpy as np\n'), ((6365, 6379), 'numpy.mean', 'np.mean', (['kv[l]'], {}), '(kv[l])\n', (6372, 6379), True, 'import numpy as np\n'), ((6380, 6393), 'numpy.std', 'np.std', (['kv[l]'], {}), '(kv[l])\n', (6386, 6393), True, 'import numpy as np\n')] |
import pytest
import os
import numpy as np
import pyscal.core as pc
import pyscal.crystal_structures as pcs
def test_sro():
atoms, box = pcs.make_crystal('l12', lattice_constant=4.00, repetitions=[2,2,2])
sys = pc.System()
sys.box = box
sys.atoms = atoms
sys.find_neighbors(method='cutoff', cutoff=4.5)
sro = sys.calculate_sro(reference_type=1, average=True)
assert np.round(sro[0], decimals=2) == -0.33
assert sro[1] == 1.0
atoms = sys.atoms
sro = atoms[4].sro
assert np.round(sro[0], decimals=2) == -0.33
assert sro[1] == 1.0
sys.find_neighbors(method='cutoff', cutoff=4.5)
sro = sys.calculate_sro(reference_type=1, average=True, shells=1)
assert np.round(sro[0], decimals=2) == 0.11
| [
"pyscal.crystal_structures.make_crystal",
"numpy.round",
"pyscal.core.System"
] | [((142, 210), 'pyscal.crystal_structures.make_crystal', 'pcs.make_crystal', (['"""l12"""'], {'lattice_constant': '(4.0)', 'repetitions': '[2, 2, 2]'}), "('l12', lattice_constant=4.0, repetitions=[2, 2, 2])\n", (158, 210), True, 'import pyscal.crystal_structures as pcs\n'), ((220, 231), 'pyscal.core.System', 'pc.System', ([], {}), '()\n', (229, 231), True, 'import pyscal.core as pc\n'), ((396, 424), 'numpy.round', 'np.round', (['sro[0]'], {'decimals': '(2)'}), '(sro[0], decimals=2)\n', (404, 424), True, 'import numpy as np\n'), ((516, 544), 'numpy.round', 'np.round', (['sro[0]'], {'decimals': '(2)'}), '(sro[0], decimals=2)\n', (524, 544), True, 'import numpy as np\n'), ((713, 741), 'numpy.round', 'np.round', (['sro[0]'], {'decimals': '(2)'}), '(sro[0], decimals=2)\n', (721, 741), True, 'import numpy as np\n')] |
import primes
import math
import numpy as np
from PIL import Image
def run(params, v=False):
txt_path = params['txt_path']
end_with = params['end_with']
img_path = params['img_path']
exp_path = params['exp_path']
if v: print(f"Opening text file: {txt_path}...")
txt_string = gettext(txt_path, endwith=end_with)
prime_pixels = primes.getpixels(len(txt_string), v)
min_pixels = prime_pixels[-1]
if v: print(f"Opening image file: {img_path}...")
img_array = getimage(img_path, minpixels=min_pixels)
if v: print("Ready: Embeding text into image...")
img_array = encode(img_array, prime_pixels, txt_string)
if v: print(f"DONE: Saving image as {exp_path}...")
saveimage(img_array, exp_path)
def gettext(path, endwith=False):
def markend(text, endwith):
return text + endwith if endwith not in text else False
with open(path, 'r') as file:
text = file.read()
if endwith:
text = markend(text, endwith)
return text
def getimage(path, minpixels=False):
def newdimension(oldsize, minpixels):
old_W, old_H = oldsize
ratio = old_W / old_H
x = math.sqrt(minpixels / ratio) #Because, minpixels = ratio*x * x
new_W = math.ceil(ratio * x)
new_H = math.ceil(x)
return new_W, new_H
with Image.open(path) as img:
img = img.convert('L')
if minpixels:
img = img.resize(newdimension(img.size, minpixels))
image = np.array(img)
return image
def encode(image, pixels, text):
columns = image.shape[1]
for pixel, char in zip(pixels, text):
r = pixel // columns
c = pixel % columns
image[r,c] = ord(char)
image = image.astype(np.uint8)
return image
def saveimage(image, path):
img = Image.fromarray(image)
img.save(path, format='PNG', optimize=False)
if __name__ == '__main__':
params = {
'txt_path': 'files/usa-constitution.txt',
'end_with': '1787',
'img_path': 'images/lincoln-cracked.png',
'exp_path': 'exports/lincoln-encoded.png'
}
run(params, v=True) | [
"math.sqrt",
"math.ceil",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray"
] | [((1635, 1657), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1650, 1657), False, 'from PIL import Image\n'), ((1074, 1102), 'math.sqrt', 'math.sqrt', (['(minpixels / ratio)'], {}), '(minpixels / ratio)\n', (1083, 1102), False, 'import math\n'), ((1150, 1170), 'math.ceil', 'math.ceil', (['(ratio * x)'], {}), '(ratio * x)\n', (1159, 1170), False, 'import math\n'), ((1181, 1193), 'math.ceil', 'math.ceil', (['x'], {}), '(x)\n', (1190, 1193), False, 'import math\n'), ((1223, 1239), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1233, 1239), False, 'from PIL import Image\n'), ((1354, 1367), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1362, 1367), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
import simdna
import simdna.util as util
import simdna.synthetic as synthetic
import argparse
def do(options):
if (options.seed is not None):
import numpy as np
np.random.seed(options.seed)
import random
random.seed(options.seed)
outputFileName_core = util.addArguments("DensityEmbedding",
[util.ArgumentToAdd(options.prefix, "prefix"),
util.BooleanArgument(options.bestHit, "bestHit"),
util.ArrArgument(options.motifNames, "motifs"),
util.ArgumentToAdd(options.min_motifs, "min"),
util.ArgumentToAdd(options.max_motifs, "max"),
util.ArgumentToAdd(options.mean_motifs, "mean"),
util.ArgumentToAdd(options.zero_prob, "zeroProb"),
util.ArgumentToAdd(options.seqLength, "seqLength"),
util.ArgumentToAdd(options.numSeqs, "numSeqs")])
loadedMotifs = synthetic.LoadedEncodeMotifs(options.pathToMotifs, pseudocountProb=0.001)
Constructor = synthetic.BestHitPwmFromLoadedMotifs if options.bestHit else synthetic.PwmSamplerFromLoadedMotifs
embedInBackground = synthetic.EmbedInABackground(
backgroundGenerator=synthetic.ZeroOrderBackgroundGenerator(seqLength=options.seqLength)
, embedders=[
synthetic.RepeatedEmbedder(
synthetic.SubstringEmbedder(
#synthetic.ReverseComplementWrapper(
substringGenerator=Constructor(
loadedMotifs=loadedMotifs,motifName=motifName)
#),
,positionGenerator=synthetic.UniformPositionGenerator()),
quantityGenerator=synthetic.ZeroInflater(synthetic.MinMaxWrapper(
synthetic.PoissonQuantityGenerator(options.mean_motifs),
theMax=options.max_motifs, theMin=options.min_motifs), zeroProb=options.zero_prob)
)
for motifName in options.motifNames
]
)
sequenceSet = synthetic.GenerateSequenceNTimes(embedInBackground, options.numSeqs)
synthetic.printSequences(outputFileName_core+".simdata", sequenceSet,
includeFasta=True, includeEmbeddings=True,
prefix=options.prefix)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--prefix")
parser.add_argument("--pathToMotifs",
default=simdna.ENCODE_MOTIFS_PATH)
parser.add_argument("--bestHit", action="store_true")
parser.add_argument("--motifNames", type=str, nargs='+', required=True)
parser.add_argument("--max-motifs",type=int, required=True)
parser.add_argument("--min-motifs",type=int, default=0)
parser.add_argument("--mean-motifs",type=int, required=True)
parser.add_argument("--zero-prob",type=float, required=False, default=0)
parser.add_argument("--seqLength", type=int, required=True)
parser.add_argument("--numSeqs", type=int, required=True)
parser.add_argument("--seed", type=int, default=None)
options = parser.parse_args()
do(options)
| [
"simdna.util.ArrArgument",
"numpy.random.seed",
"argparse.ArgumentParser",
"simdna.synthetic.UniformPositionGenerator",
"simdna.synthetic.GenerateSequenceNTimes",
"random.seed",
"simdna.synthetic.printSequences",
"simdna.synthetic.LoadedEncodeMotifs",
"simdna.util.ArgumentToAdd",
"simdna.util.Bool... | [((1072, 1145), 'simdna.synthetic.LoadedEncodeMotifs', 'synthetic.LoadedEncodeMotifs', (['options.pathToMotifs'], {'pseudocountProb': '(0.001)'}), '(options.pathToMotifs, pseudocountProb=0.001)\n', (1100, 1145), True, 'import simdna.synthetic as synthetic\n'), ((2128, 2196), 'simdna.synthetic.GenerateSequenceNTimes', 'synthetic.GenerateSequenceNTimes', (['embedInBackground', 'options.numSeqs'], {}), '(embedInBackground, options.numSeqs)\n', (2160, 2196), True, 'import simdna.synthetic as synthetic\n'), ((2201, 2342), 'simdna.synthetic.printSequences', 'synthetic.printSequences', (["(outputFileName_core + '.simdata')", 'sequenceSet'], {'includeFasta': '(True)', 'includeEmbeddings': '(True)', 'prefix': 'options.prefix'}), "(outputFileName_core + '.simdata', sequenceSet,\n includeFasta=True, includeEmbeddings=True, prefix=options.prefix)\n", (2225, 2342), True, 'import simdna.synthetic as synthetic\n'), ((2437, 2462), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2460, 2462), False, 'import argparse\n'), ((225, 253), 'numpy.random.seed', 'np.random.seed', (['options.seed'], {}), '(options.seed)\n', (239, 253), True, 'import numpy as np\n'), ((285, 310), 'random.seed', 'random.seed', (['options.seed'], {}), '(options.seed)\n', (296, 310), False, 'import random\n'), ((409, 453), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.prefix', '"""prefix"""'], {}), "(options.prefix, 'prefix')\n", (427, 453), True, 'import simdna.util as util\n'), ((480, 528), 'simdna.util.BooleanArgument', 'util.BooleanArgument', (['options.bestHit', '"""bestHit"""'], {}), "(options.bestHit, 'bestHit')\n", (500, 528), True, 'import simdna.util as util\n'), ((555, 601), 'simdna.util.ArrArgument', 'util.ArrArgument', (['options.motifNames', '"""motifs"""'], {}), "(options.motifNames, 'motifs')\n", (571, 601), True, 'import simdna.util as util\n'), ((628, 673), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.min_motifs', '"""min"""'], {}), "(options.min_motifs, 'min')\n", (646, 673), True, 'import simdna.util as util\n'), ((700, 745), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.max_motifs', '"""max"""'], {}), "(options.max_motifs, 'max')\n", (718, 745), True, 'import simdna.util as util\n'), ((772, 819), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.mean_motifs', '"""mean"""'], {}), "(options.mean_motifs, 'mean')\n", (790, 819), True, 'import simdna.util as util\n'), ((846, 895), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.zero_prob', '"""zeroProb"""'], {}), "(options.zero_prob, 'zeroProb')\n", (864, 895), True, 'import simdna.util as util\n'), ((922, 972), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.seqLength', '"""seqLength"""'], {}), "(options.seqLength, 'seqLength')\n", (940, 972), True, 'import simdna.util as util\n'), ((999, 1045), 'simdna.util.ArgumentToAdd', 'util.ArgumentToAdd', (['options.numSeqs', '"""numSeqs"""'], {}), "(options.numSeqs, 'numSeqs')\n", (1017, 1045), True, 'import simdna.util as util\n'), ((1347, 1414), 'simdna.synthetic.ZeroOrderBackgroundGenerator', 'synthetic.ZeroOrderBackgroundGenerator', ([], {'seqLength': 'options.seqLength'}), '(seqLength=options.seqLength)\n', (1385, 1414), True, 'import simdna.synthetic as synthetic\n'), ((1742, 1778), 'simdna.synthetic.UniformPositionGenerator', 'synthetic.UniformPositionGenerator', ([], {}), '()\n', (1776, 1778), True, 'import simdna.synthetic as synthetic\n'), ((1875, 1930), 'simdna.synthetic.PoissonQuantityGenerator', 'synthetic.PoissonQuantityGenerator', (['options.mean_motifs'], {}), '(options.mean_motifs)\n', (1909, 1930), True, 'import simdna.synthetic as synthetic\n')] |
# Copyright 2020 Regents of the University of Colorado. All Rights Reserved.
# Released under the MIT license.
# This software was developed at the University of Colorado's Laboratory for
# Atmospheric and Space Physics.
# Verify current version before use at: https://github.com/MAVENSDC/PyTplot
import cdflib
# If the user has astropy installed, use the cdflib's CDFAstropy class for time conversion
# (Converting to unix time is much, much faster this way)
try:
from cdflib.epochs_astropy import CDFAstropy as cdfepoch
except:
from cdflib.epochs import CDFepoch as cdfepoch
import re
import numpy as np
import xarray as xr
from pytplot.store_data import store_data
from pytplot.tplot import tplot
from pytplot.options import options
import pytplot
import copy
def cdf_to_tplot(filenames, varformat=None, get_support_data=False,
get_ignore_data=False, string_encoding='ascii',
prefix='', suffix='', plot=False, merge=False,
center_measurement=False, notplot=False, varnames=[]):
"""
This function will automatically create tplot variables from CDF files. In general, the files should be
ISTP compliant for this importer to work. Each variable is read into a new tplot variable (a.k.a an xarray DataArray),
and all associated file/variable metadata is read into the attrs dictionary.
.. note::
Variables must have an attribute named "VAR_TYPE". If the attribute entry
is "data" (or "support_data"), then they will be added as tplot variables.
Additionally, data variables should have attributes named "DEPEND_TIME" or
"DEPEND_0" that describes which variable is x axis. If the data is 2D,
then an attribute "DEPEND_1" must describe which variable contains the
secondary axis.
Parameters:
filenames : str/list of str
The file names and full paths of CDF files.
varformat : str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
prefix: str
The tplot variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
plot: bool
The data is plotted immediately after being generated. All tplot
variables generated from this function will be on the same plot.
merge: bool
If True, then data from different cdf files will be merged into
a single pytplot variable.
get_ignore_data: bool
Data with an attribute "VAR_TYPE" with a value of "ignore_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
center_measurement: bool
If True, the CDF epoch variables are time-shifted to the middle
of the accumulation interval by their DELTA_PLUS_VAR and
DELTA_MINUS_VAR variable attributes
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
varnames: str or list of str
Load these variables only. If [] or ['*'], then load everything.
Returns:
List of tplot variables created (unless notplot keyword is used).
"""
stored_variables = []
epoch_cache = {}
output_table = {}
metadata = {}
if not isinstance(varnames, list):
varnames = [varnames]
if len(varnames) > 0:
if '*' in varnames:
varnames = []
# pytplot.data_quants = {}
if isinstance(filenames, str):
filenames = [filenames]
elif isinstance(filenames, list):
filenames = filenames
else:
print("Invalid filenames input.")
return stored_variables
var_type = ['data']
if varformat is None:
varformat = ".*"
if get_support_data:
var_type.append('support_data')
if get_ignore_data:
var_type.append('ignore_data')
varformat = varformat.replace("*", ".*")
var_regex = re.compile(varformat)
filenames.sort()
for filename in filenames:
cdf_file = cdflib.CDF(filename)
cdf_file.string_encoding = string_encoding
cdf_info = cdf_file.cdf_info()
all_cdf_variables = cdf_info['rVariables'] + cdf_info['zVariables']
# User defined variables.
if len(varnames) > 0:
load_cdf_variables = [value for value in varnames if value in all_cdf_variables]
else:
load_cdf_variables = all_cdf_variables
try:
gatt = cdf_file.globalattsget()
except:
gatt={}
for var in load_cdf_variables:
if not re.match(var_regex, var):
continue
var_atts = cdf_file.varattsget(var)
if 'VAR_TYPE' in var_atts:
this_var_type = var_atts['VAR_TYPE'].lower()
elif 'PARAMETER_TYPE' in var_atts:
this_var_type = var_atts['PARAMETER_TYPE'].lower()
else:
# 'VAR_TYPE' and 'PARAMETER_TYPE' not found in the variable attributes
continue
if this_var_type in var_type:
var_atts = cdf_file.varattsget(var)
var_properties = cdf_file.varinq(var)
# Find data name and if it is already in stored variables
if 'TPLOT_NAME' in var_atts:
var_name = prefix + var_atts['TPLOT_NAME'] + suffix
else:
var_name = prefix + var + suffix
if "DEPEND_TIME" in var_atts:
x_axis_var = var_atts["DEPEND_TIME"]
elif "DEPEND_0" in var_atts:
x_axis_var = var_atts["DEPEND_0"]
else:
# non-record varying variables (NRVs)
# added by egrimes, 13Jan2021
# here we assume if there isn't a DEPEND_TIME or DEPEND_0, there are no other depends
try:
ydata = cdf_file.varget(var)
except:
continue
if ydata is None:
continue
# since NRVs don't vary with time, they shouldn't vary across files
output_table[var_name] = {'y': ydata}
continue
data_type_description \
= cdf_file.varinq(x_axis_var)['Data_Type_Description']
if epoch_cache.get(filename+x_axis_var) is None:
delta_plus_var = 0.0
delta_minus_var = 0.0
delta_time = 0.0
# Skip variables with ValueErrors.
try:
xdata = cdf_file.varget(x_axis_var)
epoch_var_atts = cdf_file.varattsget(x_axis_var)
except ValueError:
continue
# check for DELTA_PLUS_VAR/DELTA_MINUS_VAR attributes
if center_measurement:
if 'DELTA_PLUS_VAR' in epoch_var_atts:
delta_plus_var = cdf_file.varget(epoch_var_atts['DELTA_PLUS_VAR'])
delta_plus_var_att = cdf_file.varattsget(epoch_var_atts['DELTA_PLUS_VAR'])
# check if a conversion to seconds is required
if 'SI_CONVERSION' in delta_plus_var_att:
si_conv = delta_plus_var_att['SI_CONVERSION']
delta_plus_var = delta_plus_var.astype(float)*np.float(si_conv.split('>')[0])
elif 'SI_CONV' in delta_plus_var_att:
si_conv = delta_plus_var_att['SI_CONV']
delta_plus_var = delta_plus_var.astype(float)*np.float(si_conv.split('>')[0])
if 'DELTA_MINUS_VAR' in epoch_var_atts:
delta_minus_var = cdf_file.varget(epoch_var_atts['DELTA_MINUS_VAR'])
delta_minus_var_att = cdf_file.varattsget(epoch_var_atts['DELTA_MINUS_VAR'])
# check if a conversion to seconds is required
if 'SI_CONVERSION' in delta_minus_var_att:
si_conv = delta_minus_var_att['SI_CONVERSION']
delta_minus_var = delta_minus_var.astype(float)*np.float(si_conv.split('>')[0])
elif 'SI_CONV' in delta_minus_var_att:
si_conv = delta_minus_var_att['SI_CONV']
delta_minus_var = delta_minus_var.astype(float)*np.float(si_conv.split('>')[0])
# sometimes these are specified as arrays
if isinstance(delta_plus_var, np.ndarray) and isinstance(delta_minus_var, np.ndarray):
delta_time = (delta_plus_var-delta_minus_var)/2.0
else: # and sometimes constants
if delta_plus_var != 0.0 or delta_minus_var != 0.0:
delta_time = (delta_plus_var-delta_minus_var)/2.0
if epoch_cache.get(filename + x_axis_var) is None:
if ('CDF_TIME' in data_type_description) or \
('CDF_EPOCH' in data_type_description):
xdata = cdfepoch.unixtime(xdata)
epoch_cache[filename+x_axis_var] = np.array(xdata)+delta_time
else:
xdata = epoch_cache[filename + x_axis_var]
try:
ydata = cdf_file.varget(var)
except:
continue
if ydata is None:
continue
if "FILLVAL" in var_atts:
if (var_properties['Data_Type_Description'] ==
'CDF_FLOAT' or
var_properties['Data_Type_Description'] ==
'CDF_REAL4' or
var_properties['Data_Type_Description'] ==
'CDF_DOUBLE' or
var_properties['Data_Type_Description'] ==
'CDF_REAL8'):
if ydata[ydata == var_atts["FILLVAL"]].size != 0:
ydata[ydata == var_atts["FILLVAL"]] = np.nan
elif var_properties['Data_Type_Description'][:7] == 'CDF_INT':
# NaN is only valid for floating point data
# but we still need to handle FILLVAL's for
# integer data, so we'll just set those to 0
ydata[ydata == var_atts["FILLVAL"]] = 0
tplot_data = {'x': xdata, 'y': ydata}
# Data may depend on other data in the CDF.
depend_1 = None
depend_2 = None
depend_3 = None
if "DEPEND_1" in var_atts:
if var_atts["DEPEND_1"] in all_cdf_variables:
depend_1 = np.array(cdf_file.varget(var_atts["DEPEND_1"]))
# Ignore the depend types if they are strings
if depend_1.dtype.type is np.str_:
depend_1 = None
if "DEPEND_2" in var_atts:
if var_atts["DEPEND_2"] in all_cdf_variables:
depend_2 = np.array(cdf_file.varget(var_atts["DEPEND_2"]))
# Ignore the depend types if they are strings
if depend_2.dtype.type is np.str_:
depend_2 = None
if "DEPEND_3" in var_atts:
if var_atts["DEPEND_3"] in all_cdf_variables:
depend_3 = np.array(cdf_file.varget(var_atts["DEPEND_3"]))
# Ignore the depend types if they are strings
if depend_3.dtype.type is np.str_:
depend_3 = None
nontime_varying_depends = []
if depend_1 is not None and depend_2 is not None and depend_3 is not None:
tplot_data['v1'] = depend_1
tplot_data['v2'] = depend_2
tplot_data['v3'] = depend_3
if len(depend_1.shape) == 1:
nontime_varying_depends.append('v1')
if len(depend_2.shape) == 1:
nontime_varying_depends.append('v2')
if len(depend_3.shape) == 1:
nontime_varying_depends.append('v3')
elif depend_1 is not None and depend_2 is not None:
tplot_data['v1'] = depend_1
tplot_data['v2'] = depend_2
if len(depend_1.shape) == 1:
nontime_varying_depends.append('v1')
if len(depend_2.shape) == 1:
nontime_varying_depends.append('v2')
elif depend_1 is not None:
tplot_data['v'] = depend_1
if len(depend_1.shape) == 1:
nontime_varying_depends.append('v')
elif depend_2 is not None:
tplot_data['v'] = depend_2
if len(depend_2.shape) == 1:
nontime_varying_depends.append('v')
metadata[var_name] = {'display_type': var_atts.get("DISPLAY_TYPE", "time_series"),
'scale_type': var_atts.get("SCALE_TYP", "linear"),
'var_attrs': var_atts, 'file_name': filename, 'global_attrs': gatt}
# Check if the variable already exists in the for loop output
if var_name not in output_table:
output_table[var_name] = tplot_data
else:
# If it does, loop though the existing variable's x,y,v,v2,v3,etc
var_data = output_table[var_name]
for output_var in var_data:
if output_var not in nontime_varying_depends:
if np.asarray(tplot_data[output_var]).ndim == 0 and np.equal(tplot_data[output_var], None):
# If there is nothing in the new variable, then pass
pass
elif np.asarray(var_data[output_var]).ndim == 0 and np.equal(var_data[output_var], None):
# If there is nothing in the old variable, then replace
var_data[output_var] = tplot_data[output_var]
else: # If they both have something, then concatenate
var_data[output_var] = np.concatenate((var_data[output_var], tplot_data[output_var]))
if notplot:
return output_table
for var_name in output_table.keys():
to_merge = False
if var_name in pytplot.data_quants.keys() and merge:
prev_data_quant = pytplot.data_quants[var_name]
to_merge = True
try:
attr_dict = {}
if metadata.get(var_name) is not None:
attr_dict["CDF"] = {}
attr_dict["CDF"]["VATT"] = metadata[var_name]['var_attrs']
attr_dict["CDF"]["GATT"] = metadata[var_name]['global_attrs']
attr_dict["CDF"]["FILENAME"] = metadata[var_name]['file_name']
# extract the coordinate system, if available
vatt_keys = list(attr_dict["CDF"]["VATT"].keys())
vatt_lower = [k.lower() for k in vatt_keys]
if 'coordinate_system' in vatt_lower:
attr_dict['data_att'] = {'coord_sys': attr_dict["CDF"]["VATT"][vatt_keys[vatt_lower.index('coordinate_system')]]}
store_data(var_name, data=output_table[var_name], attr_dict=attr_dict)
except ValueError:
continue
if var_name not in stored_variables:
stored_variables.append(var_name)
if metadata.get(var_name) is not None:
if metadata[var_name]['display_type'] == "spectrogram":
options(var_name, 'spec', 1)
if metadata[var_name]['scale_type'] == 'log':
options(var_name, 'ylog', 1)
if metadata[var_name].get('var_attrs') is not None:
if metadata[var_name]['var_attrs'].get('LABLAXIS') is not None:
options(var_name, 'ytitle', metadata[var_name]['var_attrs']['LABLAXIS'])
if metadata[var_name]['var_attrs'].get('UNITS') is not None:
if metadata[var_name]['display_type'] == 'spectrogram':
options(var_name, 'ztitle', '[' + metadata[var_name]['var_attrs']['UNITS'] + ']')
else:
options(var_name, 'ysubtitle', '[' + metadata[var_name]['var_attrs']['UNITS'] + ']')
# Gather up all options in the variable attribute section, toss them into options and see what sticks
options(var_name, opt_dict=metadata[var_name]['var_attrs'])
if to_merge is True:
cur_data_quant = pytplot.data_quants[var_name]
plot_options = copy.deepcopy(pytplot.data_quants[var_name].attrs)
pytplot.data_quants[var_name] = xr.concat([prev_data_quant, cur_data_quant], dim='time').sortby('time')
pytplot.data_quants[var_name].attrs = plot_options
if notplot:
return output_table
if plot:
tplot(stored_variables)
return stored_variables
| [
"copy.deepcopy",
"pytplot.store_data.store_data",
"cdflib.epochs.CDFepoch.unixtime",
"numpy.asarray",
"pytplot.data_quants.keys",
"re.match",
"xarray.concat",
"numpy.equal",
"cdflib.CDF",
"numpy.array",
"pytplot.tplot.tplot",
"pytplot.options.options",
"numpy.concatenate",
"re.compile"
] | [((4544, 4565), 're.compile', 're.compile', (['varformat'], {}), '(varformat)\n', (4554, 4565), False, 'import re\n'), ((4637, 4657), 'cdflib.CDF', 'cdflib.CDF', (['filename'], {}), '(filename)\n', (4647, 4657), False, 'import cdflib\n'), ((18287, 18310), 'pytplot.tplot.tplot', 'tplot', (['stored_variables'], {}), '(stored_variables)\n', (18292, 18310), False, 'from pytplot.tplot import tplot\n'), ((16581, 16651), 'pytplot.store_data.store_data', 'store_data', (['var_name'], {'data': 'output_table[var_name]', 'attr_dict': 'attr_dict'}), '(var_name, data=output_table[var_name], attr_dict=attr_dict)\n', (16591, 16651), False, 'from pytplot.store_data import store_data\n'), ((17814, 17873), 'pytplot.options.options', 'options', (['var_name'], {'opt_dict': "metadata[var_name]['var_attrs']"}), "(var_name, opt_dict=metadata[var_name]['var_attrs'])\n", (17821, 17873), False, 'from pytplot.options import options\n'), ((17990, 18040), 'copy.deepcopy', 'copy.deepcopy', (['pytplot.data_quants[var_name].attrs'], {}), '(pytplot.data_quants[var_name].attrs)\n', (18003, 18040), False, 'import copy\n'), ((5199, 5223), 're.match', 're.match', (['var_regex', 'var'], {}), '(var_regex, var)\n', (5207, 5223), False, 'import re\n'), ((15704, 15730), 'pytplot.data_quants.keys', 'pytplot.data_quants.keys', ([], {}), '()\n', (15728, 15730), False, 'import pytplot\n'), ((16924, 16952), 'pytplot.options.options', 'options', (['var_name', '"""spec"""', '(1)'], {}), "(var_name, 'spec', 1)\n", (16931, 16952), False, 'from pytplot.options import options\n'), ((17027, 17055), 'pytplot.options.options', 'options', (['var_name', '"""ylog"""', '(1)'], {}), "(var_name, 'ylog', 1)\n", (17034, 17055), False, 'from pytplot.options import options\n'), ((17220, 17292), 'pytplot.options.options', 'options', (['var_name', '"""ytitle"""', "metadata[var_name]['var_attrs']['LABLAXIS']"], {}), "(var_name, 'ytitle', metadata[var_name]['var_attrs']['LABLAXIS'])\n", (17227, 17292), False, 'from pytplot.options import options\n'), ((18085, 18141), 'xarray.concat', 'xr.concat', (['[prev_data_quant, cur_data_quant]'], {'dim': '"""time"""'}), "([prev_data_quant, cur_data_quant], dim='time')\n", (18094, 18141), True, 'import xarray as xr\n'), ((10001, 10025), 'cdflib.epochs.CDFepoch.unixtime', 'cdfepoch.unixtime', (['xdata'], {}), '(xdata)\n', (10018, 10025), True, 'from cdflib.epochs import CDFepoch as cdfepoch\n'), ((17470, 17555), 'pytplot.options.options', 'options', (['var_name', '"""ztitle"""', "('[' + metadata[var_name]['var_attrs']['UNITS'] + ']')"], {}), "(var_name, 'ztitle', '[' + metadata[var_name]['var_attrs']['UNITS'] +\n ']')\n", (17477, 17555), False, 'from pytplot.options import options\n'), ((17602, 17691), 'pytplot.options.options', 'options', (['var_name', '"""ysubtitle"""', "('[' + metadata[var_name]['var_attrs']['UNITS'] + ']')"], {}), "(var_name, 'ysubtitle', '[' + metadata[var_name]['var_attrs'][\n 'UNITS'] + ']')\n", (17609, 17691), False, 'from pytplot.options import options\n'), ((10085, 10100), 'numpy.array', 'np.array', (['xdata'], {}), '(xdata)\n', (10093, 10100), True, 'import numpy as np\n'), ((14922, 14960), 'numpy.equal', 'np.equal', (['tplot_data[output_var]', 'None'], {}), '(tplot_data[output_var], None)\n', (14930, 14960), True, 'import numpy as np\n'), ((15164, 15200), 'numpy.equal', 'np.equal', (['var_data[output_var]', 'None'], {}), '(var_data[output_var], None)\n', (15172, 15200), True, 'import numpy as np\n'), ((15506, 15568), 'numpy.concatenate', 'np.concatenate', (['(var_data[output_var], tplot_data[output_var])'], {}), '((var_data[output_var], tplot_data[output_var]))\n', (15520, 15568), True, 'import numpy as np\n'), ((14873, 14907), 'numpy.asarray', 'np.asarray', (['tplot_data[output_var]'], {}), '(tplot_data[output_var])\n', (14883, 14907), True, 'import numpy as np\n'), ((15117, 15149), 'numpy.asarray', 'np.asarray', (['var_data[output_var]'], {}), '(var_data[output_var])\n', (15127, 15149), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 16:36:56 2018
@author: Alex
"""
#%% Import packages
import pickle
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import os
os.chdir('C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\src\\topic modeling\\')
from plotly_network import plot
#%% Load data
# Load metatopic allocations
processed_data_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\processed\\'
filename = processed_data_folder + 'topic_assignments'
with open(filename, 'rb') as fp:
topic_assignments, meta_topic_assignments = pickle.load(fp)
# Load distance matrix
filename = processed_data_folder + 'graph_and_labels'
with open(filename, 'rb') as fp:
graph_mat,topic_labels,dist_mat,doc_topic_mat = pickle.load(fp)
#%% Loop through meta-topics
plt.close()
#for meta_topic in np.unique(meta_topic_assignments):
meta_topic = 0
# Find the sub topics
sub_topics, = np.where(meta_topic_assignments == meta_topic)
# Get the distance matrix just for those topics
sub_dist_mat = dist_mat[sub_topics][:,sub_topics]
# Generate the graph matrix by selecting an appropriate threshold
graph_mat = sub_dist_mat < 0.95
if not np.any(graph_mat):
min_val = np.min(sub_dist_mat)
graph_mat = sub_dist_mat <= min_val
# Find the docs belonging to that subtopic
#docs = np.in1d(topic_assignments,sub_topics)
# Get subtopic labels
sub_topic_labels = {sub_topic:topic_labels[sub_topic] for sub_topic in sub_topics if sub_topic in topic_labels}
new_sub_topic_labels = {}
#
# Rename the keys
for counter, value in enumerate(sub_topic_labels.keys()):
new_sub_topic_labels[counter] = sub_topic_labels[value]
# Plot the graph
plt.figure()
G = nx.from_numpy_matrix(graph_mat)
#pos = nx.graphviz_layout(G)
#pos = nx.nx_agraph.graphviz_layout(G)
#pos=nx.spring_layout(G)
pos = nx.layout.circular_layout(G)
nx.relabel_nodes(G,sub_topic_labels)
nx.draw(G,pos)
nx.draw_networkx_labels(G,pos,new_sub_topic_labels,font_size=16)
node_labels = list(sub_topic_labels.values())
#%% Calculate text positions
text_pos = []
for key, value in pos.items():
if value[0] < 0:
pos_part2 = ' left'
else:
pos_part2 = ' right'
if value[1] < 0:
pos_part1 = 'bottom'
else:
pos_part1 = 'top'
text_pos.append(pos_part1 + pos_part2)
#%% Plot in plot
url = plot(G,pos,node_labels,text_pos)
| [
"networkx.from_numpy_matrix",
"matplotlib.pyplot.close",
"networkx.relabel_nodes",
"numpy.any",
"plotly_network.plot",
"matplotlib.pyplot.figure",
"numpy.where",
"networkx.layout.circular_layout",
"networkx.draw",
"networkx.draw_networkx_labels",
"pickle.load",
"numpy.min",
"os.chdir"
] | [((204, 309), 'os.chdir', 'os.chdir', (['"""C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\src\\\\topic modeling\\\\"""'], {}), "(\n 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\src\\\\topic modeling\\\\'\n )\n", (212, 309), False, 'import os\n'), ((859, 870), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (868, 870), True, 'import matplotlib.pyplot as plt\n'), ((977, 1023), 'numpy.where', 'np.where', (['(meta_topic_assignments == meta_topic)'], {}), '(meta_topic_assignments == meta_topic)\n', (985, 1023), True, 'import numpy as np\n'), ((1742, 1754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1752, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1790), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['graph_mat'], {}), '(graph_mat)\n', (1779, 1790), True, 'import networkx as nx\n'), ((1891, 1919), 'networkx.layout.circular_layout', 'nx.layout.circular_layout', (['G'], {}), '(G)\n', (1916, 1919), True, 'import networkx as nx\n'), ((1920, 1957), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['G', 'sub_topic_labels'], {}), '(G, sub_topic_labels)\n', (1936, 1957), True, 'import networkx as nx\n'), ((1957, 1972), 'networkx.draw', 'nx.draw', (['G', 'pos'], {}), '(G, pos)\n', (1964, 1972), True, 'import networkx as nx\n'), ((1972, 2039), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos', 'new_sub_topic_labels'], {'font_size': '(16)'}), '(G, pos, new_sub_topic_labels, font_size=16)\n', (1995, 2039), True, 'import networkx as nx\n'), ((2401, 2436), 'plotly_network.plot', 'plot', (['G', 'pos', 'node_labels', 'text_pos'], {}), '(G, pos, node_labels, text_pos)\n', (2405, 2436), False, 'from plotly_network import plot\n'), ((621, 636), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (632, 636), False, 'import pickle\n'), ((806, 821), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (817, 821), False, 'import pickle\n'), ((1233, 1250), 'numpy.any', 'np.any', (['graph_mat'], {}), '(graph_mat)\n', (1239, 1250), True, 'import numpy as np\n'), ((1266, 1286), 'numpy.min', 'np.min', (['sub_dist_mat'], {}), '(sub_dist_mat)\n', (1272, 1286), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import click
import logging
import os
from tqdm import tqdm
import numpy as np
from src.features.spectrum import Spectrum
from src.utils.data_utils import M4AStreamer
logger = logging.getLogger(__name__)
def validate_and_save(sgrams, dur, base_name: str):
for i, sgram in enumerate(sgrams):
assert sgram.shape[1] == dur
fname = base_name + '_' + str(i) + '.npy'
np.save(fname, sgram)
@click.command()
@click.option('--dataset', default='VoxCeleb1',
type=click.Choice(['VoxCeleb1', 'VoxCeleb2']))
@click.option('--duration', default=3.0,
help='Duration of Audio files to extract')
@click.option('--verbose', '-v', is_flag=True, help='show debug output')
@click.option('--progress', is_flag=True, help='Show Progress Bar')
@click.option('--force', is_flag=True, help='Force overwrite spectrograms')
@click.pass_context
def featuregen(ctx, dataset, duration, verbose, progress, force):
if verbose:
logger.setLevel(logging.DEBUG)
app_config = ctx.obj.app_config
hparams = ctx.obj.hparams
setattr(hparams, 'duration', duration)
data_dir = app_config.data_dir[dataset]
audio_files = M4AStreamer(data_dir)
specgen = Spectrum(hparams)
if progress and not verbose:
audio_files = tqdm(audio_files)
for audio_file in audio_files:
base_name = os.path.splitext(audio_file)[0]
sgrams = specgen.generate(audio_file)
validate_and_save(sgrams, 301, base_name)
logger.info('Finished generating all spectrograms')
| [
"tqdm.tqdm",
"numpy.save",
"click.option",
"click.command",
"click.Choice",
"os.path.splitext",
"src.features.spectrum.Spectrum",
"src.utils.data_utils.M4AStreamer",
"logging.getLogger"
] | [((201, 228), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (218, 228), False, 'import logging\n'), ((442, 457), 'click.command', 'click.command', ([], {}), '()\n', (455, 457), False, 'import click\n'), ((568, 655), 'click.option', 'click.option', (['"""--duration"""'], {'default': '(3.0)', 'help': '"""Duration of Audio files to extract"""'}), "('--duration', default=3.0, help=\n 'Duration of Audio files to extract')\n", (580, 655), False, 'import click\n'), ((666, 737), 'click.option', 'click.option', (['"""--verbose"""', '"""-v"""'], {'is_flag': '(True)', 'help': '"""show debug output"""'}), "('--verbose', '-v', is_flag=True, help='show debug output')\n", (678, 737), False, 'import click\n'), ((739, 805), 'click.option', 'click.option', (['"""--progress"""'], {'is_flag': '(True)', 'help': '"""Show Progress Bar"""'}), "('--progress', is_flag=True, help='Show Progress Bar')\n", (751, 805), False, 'import click\n'), ((807, 881), 'click.option', 'click.option', (['"""--force"""'], {'is_flag': '(True)', 'help': '"""Force overwrite spectrograms"""'}), "('--force', is_flag=True, help='Force overwrite spectrograms')\n", (819, 881), False, 'import click\n'), ((1196, 1217), 'src.utils.data_utils.M4AStreamer', 'M4AStreamer', (['data_dir'], {}), '(data_dir)\n', (1207, 1217), False, 'from src.utils.data_utils import M4AStreamer\n'), ((1233, 1250), 'src.features.spectrum.Spectrum', 'Spectrum', (['hparams'], {}), '(hparams)\n', (1241, 1250), False, 'from src.features.spectrum import Spectrum\n'), ((417, 438), 'numpy.save', 'np.save', (['fname', 'sgram'], {}), '(fname, sgram)\n', (424, 438), True, 'import numpy as np\n'), ((1307, 1324), 'tqdm.tqdm', 'tqdm', (['audio_files'], {}), '(audio_files)\n', (1311, 1324), False, 'from tqdm import tqdm\n'), ((525, 565), 'click.Choice', 'click.Choice', (["['VoxCeleb1', 'VoxCeleb2']"], {}), "(['VoxCeleb1', 'VoxCeleb2'])\n", (537, 565), False, 'import click\n'), ((1381, 1409), 'os.path.splitext', 'os.path.splitext', (['audio_file'], {}), '(audio_file)\n', (1397, 1409), False, 'import os\n')] |
import airsim
import cv2
import time
import os
import numpy as np
from tensorflow.keras.models import load_model as tf_load_model
MIN_ALTITUDE = 5
MAX_ALTITUDE = 8
def go_back(client, home):
client.moveToPositionAsync(home.x_val, home.y_val, -1*np.random.randint(MIN_ALTITUDE,MAX_ALTITUDE+1), 5).join()
time.sleep(0.5)
client.moveToPositionAsync(np.random.randint(home.x_val-3,home.x_val+4), np.random.randint(home.y_val-3,home.y_val+4), -1*np.random.randint(MIN_ALTITUDE,MAX_ALTITUDE+1), 5).join()
time.sleep(0.5)
client.moveByVelocityAsync(0, 0, -0.0, 5).join()
def is_in_bounds_2d(pos, home):
return distance_2d(pos, home) < MAX_RADIUS
def distance_2d(pos, home):
return np.sqrt((home.x_val-pos.x_val)**2 + (home.y_val-pos.y_val)**2)
#load horizontal pre-trained model
def load_hor_trained_model():
loaded_model = tf_load_model("./models/modelHorizontalImage.h5")
print("Model restored.")
return loaded_model
#load vertical pre-trained model
def load_ver_trained_model():
loaded_model = tf_load_model("./models/modelVerticalNN_2.h5")
print("Model restored.")
return loaded_model
def get_image(client):
image_buf = np.zeros((1, 432 , 768, 4))
image_response = client.simGetImages([airsim.ImageRequest(3, airsim.ImageType.Scene, False, False)])[0]
png = client.simGetImages([airsim.ImageRequest(3, airsim.ImageType.Scene)])[0]
image1d = np.frombuffer(image_response.image_data_uint8, dtype=np.uint8)
image_rgba = image1d.reshape(image_response.height, image_response.width, 3)
image_rgba = cv2.cvtColor(image_rgba,cv2.COLOR_RGBA2BGR)
image_buf = image_rgba.copy()
image_buf = cv2.resize(image_buf,(150,150))
return image_buf, png
def get_action_test(curr_state, my_model_ver):
actions_index = my_model_ver.predict(np.array([[curr_state]]))[0][0]
print("Actions: ",actions_index)
return round(actions_index)
def interpret_action_hor(action):
if action == 0:
quad_offset = (-1, 0, 0, 0)
elif action == 1:
quad_offset = (0, -1, 0, 0)
elif action == 2:
quad_offset = (0, 1, 0, 0)
elif action == 3:
quad_offset = (0, 0, 0, 1)
elif action == 4:
quad_offset = (1, 0, 0, 0)
return quad_offset
def interpret_action_ver(action):
if action == 0:
quad_offset = (0, 0, 1, 0)
elif action == 1:
quad_offset = (0, 0, 0.3, 0)
elif action == 2:
quad_offset = (0, 0, 2, 0)
elif action == 3:
quad_offset = (0, 0, 0, 1)
elif action == 4:
quad_offset = (0, 0, 0.1, 0)
return quad_offset
def testNetworks():
flag_stop = "a"
snapshot_index = 0
my_model_hor = load_hor_trained_model()
my_model_ver = load_ver_trained_model()
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
home = client.getMultirotorState().kinematics_estimated.position
while flag_stop != "stop":
go_back(client, home)
trigger_hor = 0
trigger_ver = 0
step = 0
while True and step < 40:
#Stop and study data
client.moveByVelocityAsync(0, 0, -0.0, 5)
#Horizontal movement
if not trigger_hor:
current_position = client.getMultirotorState().kinematics_estimated.position
observe, png = get_image(client)
observe = np.array(observe, dtype=np.float32)
observe = observe.reshape(1,150,150,3)
actions_index = my_model_hor.predict(observe)[0]
action_index = np.argmax(actions_index)
next_action = interpret_action_hor(action_index)
#Vertical movement
else:
curr_state = np.abs(client.getLidarData().pose.position.z_val)
action_index = get_action_test(curr_state, my_model_ver)
next_action = interpret_action_ver(action_index)
new_vel_x = next_action[0]
new_vel_y = next_action[1]
new_vel_z = next_action[2]
trigger = next_action[3]
if trigger and not trigger_hor:
trigger_hor = 1
elif trigger and not trigger_ver:
trigger_ver = 1
print(" Action index: ",action_index," ====== moving at (" + str(new_vel_x) + " " + str(new_vel_y) + " " + str(new_vel_z) + ")")
client.moveByVelocityAsync(new_vel_x, new_vel_y, new_vel_z, 1).join()
time.sleep(0.001)
step += 1
#Vertical reset
if trigger_hor:
new_state = np.abs(client.getLidarData().pose.position.z_val)
if new_state <= 0.1:
if trigger_ver:
print("Landed.")
break
elif new_vel_z <= 0.3:
print("Moving near ground.")
else:
print("Collision.")
break
elif new_state > 0.1 and trigger_ver:
print("Error, not landed.")
break
flag_stop = input("Digitare stop se si vuole terminare.")
if __name__ == "__main__":
testNetworks()
| [
"tensorflow.keras.models.load_model",
"numpy.argmax",
"cv2.cvtColor",
"numpy.frombuffer",
"numpy.zeros",
"airsim.ImageRequest",
"time.sleep",
"airsim.MultirotorClient",
"numpy.random.randint",
"numpy.array",
"cv2.resize",
"numpy.sqrt"
] | [((313, 328), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (323, 328), False, 'import time\n'), ((517, 532), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (527, 532), False, 'import time\n'), ((706, 776), 'numpy.sqrt', 'np.sqrt', (['((home.x_val - pos.x_val) ** 2 + (home.y_val - pos.y_val) ** 2)'], {}), '((home.x_val - pos.x_val) ** 2 + (home.y_val - pos.y_val) ** 2)\n', (713, 776), True, 'import numpy as np\n'), ((854, 903), 'tensorflow.keras.models.load_model', 'tf_load_model', (['"""./models/modelHorizontalImage.h5"""'], {}), "('./models/modelHorizontalImage.h5')\n", (867, 903), True, 'from tensorflow.keras.models import load_model as tf_load_model\n'), ((1040, 1086), 'tensorflow.keras.models.load_model', 'tf_load_model', (['"""./models/modelVerticalNN_2.h5"""'], {}), "('./models/modelVerticalNN_2.h5')\n", (1053, 1086), True, 'from tensorflow.keras.models import load_model as tf_load_model\n'), ((1180, 1206), 'numpy.zeros', 'np.zeros', (['(1, 432, 768, 4)'], {}), '((1, 432, 768, 4))\n', (1188, 1206), True, 'import numpy as np\n'), ((1413, 1475), 'numpy.frombuffer', 'np.frombuffer', (['image_response.image_data_uint8'], {'dtype': 'np.uint8'}), '(image_response.image_data_uint8, dtype=np.uint8)\n', (1426, 1475), True, 'import numpy as np\n'), ((1574, 1618), 'cv2.cvtColor', 'cv2.cvtColor', (['image_rgba', 'cv2.COLOR_RGBA2BGR'], {}), '(image_rgba, cv2.COLOR_RGBA2BGR)\n', (1586, 1618), False, 'import cv2\n'), ((1668, 1701), 'cv2.resize', 'cv2.resize', (['image_buf', '(150, 150)'], {}), '(image_buf, (150, 150))\n', (1678, 1701), False, 'import cv2\n'), ((2777, 2802), 'airsim.MultirotorClient', 'airsim.MultirotorClient', ([], {}), '()\n', (2800, 2802), False, 'import airsim\n'), ((4534, 4551), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (4544, 4551), False, 'import time\n'), ((360, 409), 'numpy.random.randint', 'np.random.randint', (['(home.x_val - 3)', '(home.x_val + 4)'], {}), '(home.x_val - 3, home.x_val + 4)\n', (377, 409), True, 'import numpy as np\n'), ((406, 455), 'numpy.random.randint', 'np.random.randint', (['(home.y_val - 3)', '(home.y_val + 4)'], {}), '(home.y_val - 3, home.y_val + 4)\n', (423, 455), True, 'import numpy as np\n'), ((1250, 1310), 'airsim.ImageRequest', 'airsim.ImageRequest', (['(3)', 'airsim.ImageType.Scene', '(False)', '(False)'], {}), '(3, airsim.ImageType.Scene, False, False)\n', (1269, 1310), False, 'import airsim\n'), ((1347, 1393), 'airsim.ImageRequest', 'airsim.ImageRequest', (['(3)', 'airsim.ImageType.Scene'], {}), '(3, airsim.ImageType.Scene)\n', (1366, 1393), False, 'import airsim\n'), ((1816, 1840), 'numpy.array', 'np.array', (['[[curr_state]]'], {}), '([[curr_state]])\n', (1824, 1840), True, 'import numpy as np\n'), ((3426, 3461), 'numpy.array', 'np.array', (['observe'], {'dtype': 'np.float32'}), '(observe, dtype=np.float32)\n', (3434, 3461), True, 'import numpy as np\n'), ((3614, 3638), 'numpy.argmax', 'np.argmax', (['actions_index'], {}), '(actions_index)\n', (3623, 3638), True, 'import numpy as np\n'), ((251, 300), 'numpy.random.randint', 'np.random.randint', (['MIN_ALTITUDE', '(MAX_ALTITUDE + 1)'], {}), '(MIN_ALTITUDE, MAX_ALTITUDE + 1)\n', (268, 300), True, 'import numpy as np\n'), ((455, 504), 'numpy.random.randint', 'np.random.randint', (['MIN_ALTITUDE', '(MAX_ALTITUDE + 1)'], {}), '(MIN_ALTITUDE, MAX_ALTITUDE + 1)\n', (472, 504), True, 'import numpy as np\n')] |
import calendar
from datetime import datetime
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timedelta,
Timestamp,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetime64:
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week[0] == 1
assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
if accessor in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeArray._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
tm.assert_index_equal(res, exp)
def test_datetimeindex_accessors2(self):
dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
def test_datetimeindex_accessors3(self):
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
def test_datetimeindex_accessors4(self):
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert dti.is_month_start[0] == 1
def test_datetimeindex_accessors5(self):
with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
tests = [
(Timestamp("2013-06-01", freq="M").is_month_start, 1),
(Timestamp("2013-06-01", freq="BM").is_month_start, 0),
(Timestamp("2013-06-03", freq="M").is_month_start, 0),
(Timestamp("2013-06-03", freq="BM").is_month_start, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_month_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_quarter_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_year_end, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_month_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_quarter_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_year_start, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_month_end, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_quarter_end, 0),
(Timestamp("2013-03-31", freq="QS-FEB").is_year_end, 0),
(Timestamp("2013-02-01", freq="QS-FEB").is_month_start, 1),
(Timestamp("2013-02-01", freq="QS-FEB").is_quarter_start, 1),
(Timestamp("2013-02-01", freq="QS-FEB").is_year_start, 1),
(Timestamp("2013-06-30", freq="BQ").is_month_end, 0),
(Timestamp("2013-06-30", freq="BQ").is_quarter_end, 0),
(Timestamp("2013-06-30", freq="BQ").is_year_end, 0),
(Timestamp("2013-06-28", freq="BQ").is_month_end, 1),
(Timestamp("2013-06-28", freq="BQ").is_quarter_end, 1),
(Timestamp("2013-06-28", freq="BQ").is_year_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_month_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_quarter_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_year_end, 0),
(Timestamp("2013-06-28", freq="BQS-APR").is_month_end, 1),
(Timestamp("2013-06-28", freq="BQS-APR").is_quarter_end, 1),
(Timestamp("2013-03-29", freq="BQS-APR").is_year_end, 1),
(Timestamp("2013-11-01", freq="AS-NOV").is_year_start, 1),
(Timestamp("2013-10-31", freq="AS-NOV").is_year_end, 1),
(Timestamp("2012-02-01").days_in_month, 29),
(Timestamp("2013-02-01").days_in_month, 28),
]
for ts, value in tests:
assert ts == value
def test_datetimeindex_accessors6(self):
# GH 6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
expected = [52, 1, 1]
assert dates.isocalendar().week.tolist() == expected
assert [d.weekofyear for d in dates] == expected
# GH 12806
# error: Unsupported operand types for + ("List[None]" and "List[str]")
@pytest.mark.parametrize(
"time_locale", [None] + (tm.get_locales() or []) # type: ignore[operator]
)
def test_datetime_name_accessors(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
expected_months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
# GH#11128
dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
english_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
for day, name, eng_name in zip(range(4, 11), expected_days, english_days):
name = name.capitalize()
assert dti.day_name(locale=time_locale)[day] == name
assert dti.day_name(locale=None)[day] == eng_name
ts = Timestamp(datetime(2016, 4, day))
assert ts.day_name(locale=time_locale) == name
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.day_name(locale=time_locale)[-1])
ts = Timestamp(pd.NaT)
assert np.isnan(ts.day_name(locale=time_locale))
# GH#12805
dti = date_range(freq="M", start="2012", end="2013")
result = dti.month_name(locale=time_locale)
expected = Index([month.capitalize() for month in expected_months])
# work around different normalization schemes
# https://github.com/pandas-dev/pandas/issues/22342
result = result.str.normalize("NFD")
expected = expected.str.normalize("NFD")
tm.assert_index_equal(result, expected)
for date, expected in zip(dti, expected_months):
result = date.month_name(locale=time_locale)
expected = expected.capitalize()
result = unicodedata.normalize("NFD", result)
expected = unicodedata.normalize("NFD", result)
assert result == expected
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.month_name(locale=time_locale)[-1])
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
tm.assert_index_equal(dti.nanosecond, Index(np.arange(10, dtype=np.int64)))
def test_iter_readonly():
# GH#28055 ints_to_pydatetime with readonly array
arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")])
arr.setflags(write=False)
dti = pd.to_datetime(arr)
list(dti)
def test_week_and_weekofyear_are_deprecated():
# GH#33595 Deprecate week and weekofyear
idx = date_range(start="2019-12-29", freq="D", periods=4)
with tm.assert_produces_warning(FutureWarning):
idx.week
with tm.assert_produces_warning(FutureWarning):
idx.weekofyear
def test_add_timedelta_preserves_freq():
# GH#37295 should hold for any DTI with freq=None or Tick freq
tz = "Canada/Eastern"
dti = date_range(
start=Timestamp("2019-03-26 00:00:00-0400", tz=tz),
end=Timestamp("2020-10-17 00:00:00-0400", tz=tz),
freq="D",
)
result = dti + Timedelta(days=1)
assert result.freq == dti.freq
| [
"unicodedata.normalize",
"pandas.Timestamp",
"pandas.date_range",
"pandas._testing.set_locale",
"pandas.offsets.CustomBusinessDay",
"pandas._testing.assert_produces_warning",
"numpy.datetime64",
"pandas.DatetimeIndex",
"datetime.datetime",
"pytest.raises",
"pandas.to_datetime",
"pandas._testin... | [((11494, 11513), 'pandas.to_datetime', 'pd.to_datetime', (['arr'], {}), '(arr)\n', (11508, 11513), True, 'import pandas as pd\n'), ((11632, 11683), 'pandas.date_range', 'date_range', ([], {'start': '"""2019-12-29"""', 'freq': '"""D"""', 'periods': '(4)'}), "(start='2019-12-29', freq='D', periods=4)\n", (11642, 11683), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((4656, 4713), 'pandas.offsets.CustomBusinessDay', 'offsets.CustomBusinessDay', ([], {'weekmask': '"""Sun Mon Tue Wed Thu"""'}), "(weekmask='Sun Mon Tue Wed Thu')\n", (4681, 4713), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5004, 5061), 'pandas.DatetimeIndex', 'DatetimeIndex', (["['2000-01-01', '2000-01-02', '2000-01-03']"], {}), "(['2000-01-01', '2000-01-02', '2000-01-03'])\n", (5017, 5061), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7926, 7968), 'pandas.DatetimeIndex', 'DatetimeIndex', (['dates'], {'tz': '"""Europe/Brussels"""'}), "(dates, tz='Europe/Brussels')\n", (7939, 7968), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((10163, 10180), 'pandas.Timestamp', 'Timestamp', (['pd.NaT'], {}), '(pd.NaT)\n', (10172, 10180), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((10272, 10318), 'pandas.date_range', 'date_range', ([], {'freq': '"""M"""', 'start': '"""2012"""', 'end': '"""2013"""'}), "(freq='M', start='2012', end='2013')\n", (10282, 10318), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((10665, 10704), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (10686, 10704), True, 'import pandas._testing as tm\n'), ((11693, 11734), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (11719, 11734), True, 'import pandas._testing as tm\n'), ((11762, 11803), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (11788, 11803), True, 'import pandas._testing as tm\n'), ((12147, 12164), 'pandas.Timedelta', 'Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (12156, 12164), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((473, 513), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': 'msg'}), '(AttributeError, match=msg)\n', (486, 513), False, 'import pytest\n'), ((637, 677), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': 'msg'}), '(AttributeError, match=msg)\n', (650, 677), False, 'import pytest\n'), ((4023, 4054), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['res', 'exp'], {}), '(res, exp)\n', (4044, 4054), True, 'import pandas._testing as tm\n'), ((4113, 4164), 'pandas.DatetimeIndex', 'DatetimeIndex', (['[]'], {'freq': '"""D"""', 'tz': 'dti.tz', 'name': '"""name"""'}), "([], freq='D', tz=dti.tz, name='name')\n", (4126, 4164), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((4177, 4208), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['res', 'exp'], {}), '(res, exp)\n', (4198, 4208), True, 'import pandas._testing as tm\n'), ((4739, 4760), 'datetime.datetime', 'datetime', (['(2013)', '(4)', '(30)'], {}), '(2013, 4, 30)\n', (4747, 4760), False, 'from datetime import datetime\n'), ((4875, 4911), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (4888, 4911), False, 'import pytest\n'), ((5164, 5234), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {'match': '"""The \'freq\' argument"""'}), '(FutureWarning, match="The \'freq\' argument")\n', (5190, 5234), True, 'import pandas._testing as tm\n'), ((10063, 10086), 'pandas.DatetimeIndex', 'DatetimeIndex', (['[pd.NaT]'], {}), '([pd.NaT])\n', (10076, 10086), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((10887, 10923), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'result'], {}), "('NFD', result)\n", (10908, 10923), False, 'import unicodedata\n'), ((10947, 10983), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'result'], {}), "('NFD', result)\n", (10968, 10983), False, 'import unicodedata\n'), ((11048, 11071), 'pandas.DatetimeIndex', 'DatetimeIndex', (['[pd.NaT]'], {}), '([pd.NaT])\n', (11061, 11071), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((11203, 11216), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (11212, 11216), True, 'import numpy as np\n'), ((11405, 11451), 'numpy.datetime64', 'np.datetime64', (['"""2012-02-15T12:00:00.000000000"""'], {}), "('2012-02-15T12:00:00.000000000')\n", (11418, 11451), True, 'import numpy as np\n'), ((12000, 12044), 'pandas.Timestamp', 'Timestamp', (['"""2019-03-26 00:00:00-0400"""'], {'tz': 'tz'}), "('2019-03-26 00:00:00-0400', tz=tz)\n", (12009, 12044), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((12058, 12102), 'pandas.Timestamp', 'Timestamp', (['"""2020-10-17 00:00:00-0400"""'], {'tz': 'tz'}), "('2020-10-17 00:00:00-0400', tz=tz)\n", (12067, 12102), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((691, 708), 'pandas.DatetimeIndex', 'DatetimeIndex', (['[]'], {}), '([])\n', (704, 708), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((813, 833), 'datetime.datetime', 'datetime', (['(1998)', '(1)', '(1)'], {}), '(1998, 1, 1)\n', (821, 833), False, 'from datetime import datetime\n'), ((924, 944), 'datetime.datetime', 'datetime', (['(1998)', '(1)', '(1)'], {}), '(1998, 1, 1)\n', (932, 944), False, 'from datetime import datetime\n'), ((4301, 4321), 'datetime.datetime', 'datetime', (['(1998)', '(1)', '(1)'], {}), '(1998, 1, 1)\n', (4309, 4321), False, 'from datetime import datetime\n'), ((9233, 9275), 'pandas._testing.set_locale', 'tm.set_locale', (['time_locale', 'locale.LC_TIME'], {}), '(time_locale, locale.LC_TIME)\n', (9246, 9275), True, 'import pandas._testing as tm\n'), ((9449, 9469), 'datetime.datetime', 'datetime', (['(1998)', '(1)', '(1)'], {}), '(1998, 1, 1)\n', (9457, 9469), False, 'from datetime import datetime\n'), ((9955, 9977), 'datetime.datetime', 'datetime', (['(2016)', '(4)', 'day'], {}), '(2016, 4, day)\n', (9963, 9977), False, 'from datetime import datetime\n'), ((8272, 8288), 'pandas._testing.get_locales', 'tm.get_locales', ([], {}), '()\n', (8286, 8288), True, 'import pandas._testing as tm\n'), ((11271, 11300), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.int64'}), '(10, dtype=np.int64)\n', (11280, 11300), True, 'import numpy as np\n'), ((5275, 5308), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-01"""'], {'freq': '"""M"""'}), "('2013-06-01', freq='M')\n", (5284, 5308), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5346, 5380), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-01"""'], {'freq': '"""BM"""'}), "('2013-06-01', freq='BM')\n", (5355, 5380), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5418, 5451), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-03"""'], {'freq': '"""M"""'}), "('2013-06-03', freq='M')\n", (5427, 5451), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5489, 5523), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-03"""'], {'freq': '"""BM"""'}), "('2013-06-03', freq='BM')\n", (5498, 5523), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5561, 5598), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-28"""'], {'freq': '"""Q-FEB"""'}), "('2013-02-28', freq='Q-FEB')\n", (5570, 5598), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5634, 5671), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-28"""'], {'freq': '"""Q-FEB"""'}), "('2013-02-28', freq='Q-FEB')\n", (5643, 5671), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5709, 5746), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-28"""'], {'freq': '"""Q-FEB"""'}), "('2013-02-28', freq='Q-FEB')\n", (5718, 5746), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5781, 5818), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-01"""'], {'freq': '"""Q-FEB"""'}), "('2013-03-01', freq='Q-FEB')\n", (5790, 5818), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5856, 5893), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-01"""'], {'freq': '"""Q-FEB"""'}), "('2013-03-01', freq='Q-FEB')\n", (5865, 5893), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((5933, 5970), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-01"""'], {'freq': '"""Q-FEB"""'}), "('2013-03-01', freq='Q-FEB')\n", (5942, 5970), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6007, 6045), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-31"""'], {'freq': '"""QS-FEB"""'}), "('2013-03-31', freq='QS-FEB')\n", (6016, 6045), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6081, 6119), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-31"""'], {'freq': '"""QS-FEB"""'}), "('2013-03-31', freq='QS-FEB')\n", (6090, 6119), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6157, 6195), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-31"""'], {'freq': '"""QS-FEB"""'}), "('2013-03-31', freq='QS-FEB')\n", (6166, 6195), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6230, 6268), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-01"""'], {'freq': '"""QS-FEB"""'}), "('2013-02-01', freq='QS-FEB')\n", (6239, 6268), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6306, 6344), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-01"""'], {'freq': '"""QS-FEB"""'}), "('2013-02-01', freq='QS-FEB')\n", (6315, 6344), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6384, 6422), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-01"""'], {'freq': '"""QS-FEB"""'}), "('2013-02-01', freq='QS-FEB')\n", (6393, 6422), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6459, 6493), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-30"""'], {'freq': '"""BQ"""'}), "('2013-06-30', freq='BQ')\n", (6468, 6493), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6529, 6563), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-30"""'], {'freq': '"""BQ"""'}), "('2013-06-30', freq='BQ')\n", (6538, 6563), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6601, 6635), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-30"""'], {'freq': '"""BQ"""'}), "('2013-06-30', freq='BQ')\n", (6610, 6635), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6670, 6704), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-28"""'], {'freq': '"""BQ"""'}), "('2013-06-28', freq='BQ')\n", (6679, 6704), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6740, 6774), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-28"""'], {'freq': '"""BQ"""'}), "('2013-06-28', freq='BQ')\n", (6749, 6774), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6812, 6846), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-28"""'], {'freq': '"""BQ"""'}), "('2013-06-28', freq='BQ')\n", (6821, 6846), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6881, 6920), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-30"""'], {'freq': '"""BQS-APR"""'}), "('2013-06-30', freq='BQS-APR')\n", (6890, 6920), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((6956, 6995), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-30"""'], {'freq': '"""BQS-APR"""'}), "('2013-06-30', freq='BQS-APR')\n", (6965, 6995), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7033, 7072), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-30"""'], {'freq': '"""BQS-APR"""'}), "('2013-06-30', freq='BQS-APR')\n", (7042, 7072), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7107, 7146), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-28"""'], {'freq': '"""BQS-APR"""'}), "('2013-06-28', freq='BQS-APR')\n", (7116, 7146), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7182, 7221), 'pandas.Timestamp', 'Timestamp', (['"""2013-06-28"""'], {'freq': '"""BQS-APR"""'}), "('2013-06-28', freq='BQS-APR')\n", (7191, 7221), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7259, 7298), 'pandas.Timestamp', 'Timestamp', (['"""2013-03-29"""'], {'freq': '"""BQS-APR"""'}), "('2013-03-29', freq='BQS-APR')\n", (7268, 7298), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7333, 7371), 'pandas.Timestamp', 'Timestamp', (['"""2013-11-01"""'], {'freq': '"""AS-NOV"""'}), "('2013-11-01', freq='AS-NOV')\n", (7342, 7371), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7408, 7446), 'pandas.Timestamp', 'Timestamp', (['"""2013-10-31"""'], {'freq': '"""AS-NOV"""'}), "('2013-10-31', freq='AS-NOV')\n", (7417, 7446), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7481, 7504), 'pandas.Timestamp', 'Timestamp', (['"""2012-02-01"""'], {}), "('2012-02-01')\n", (7490, 7504), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n'), ((7542, 7565), 'pandas.Timestamp', 'Timestamp', (['"""2013-02-01"""'], {}), "('2013-02-01')\n", (7551, 7565), False, 'from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets\n')] |
import cv2
import random
import numpy as np
from got10k.trackers import Tracker
from config import config as cfg, finalize_configs
from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger
from train import ResNetFPNModel
from common import CustomResize, box_to_point8, point8_to_box
class PrecomputingReferenceTracker(Tracker):
def __init__(self, name, need_network=True, need_img=True, model="best"):
super().__init__(name=name, is_deterministic=True)
self._resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)
self._prev_box = None
self._ff_gt_feats = None
self._need_network = need_network
self._need_img = need_img
self._rotated_bbox = None
if need_network:
logger.set_logger_dir("/tmp/test_log_/" + str(random.randint(0, 10000)), 'd')
if model == "best":
load = "train_log/hard_mining3/model-1360500"
elif model == "nohardexamples":
load = "train_log/condrcnn_all_2gpu_lrreduce2/model-1200500"
elif model == "newrpn":
load = "train_log/newrpn1/model"
elif model =="resnet50_nohardexamples":
load = "train_log/condrcnn_all_resnet50/model-1200500"
cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
elif model =="resnet50":
load = "train_log/hard_mining3_resnet50/model-1360500"
cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
elif model == "gotonly":
load = "train_log/hard_mining3_onlygot/model-1361000"
elif model.startswith("checkpoint:"):
load = model.replace("checkpoint:", "")
else:
assert False, ("unknown model", model)
from dataset import DetectionDataset
# init tensorpack model
# cfg.freeze(False)
DetectionDataset() # initialize the config with information from our dataset
cfg.EXTRACT_GT_FEATURES = True
cfg.MODE_TRACK = False
extract_model = ResNetFPNModel()
extract_ff_feats_cfg = PredictConfig(
model=extract_model,
session_init=get_model_loader(load),
input_names=['image', 'roi_boxes'],
output_names=['rpn/feature'])
finalize_configs(is_training=False)
self._extract_func = OfflinePredictor(extract_ff_feats_cfg)
cfg.EXTRACT_GT_FEATURES = False
cfg.MODE_TRACK = True
cfg.USE_PRECOMPUTED_REF_FEATURES = True
self._pred_func = self._make_pred_func(load)
def _resize_image_together_with_boxes(self, img, *list_of_box_or_boxes):
resized_img, params = self._resizer.augment_return_params(img)
res_boxes = []
for box_or_boxes in list_of_box_or_boxes:
expand = len(box_or_boxes.shape) == 1
if expand:
boxes = box_or_boxes[np.newaxis]
else:
boxes = box_or_boxes
points = box_to_point8(boxes)
points = self._resizer.augment_coords(points, params)
resized_boxes = point8_to_box(points)
if expand:
resized_boxes = np.squeeze(resized_boxes, axis=0)
res_boxes.append(resized_boxes)
if len(res_boxes) == 1:
res_boxes = res_boxes[0]
return resized_img, res_boxes
def _make_pred_func(self, load):
from train import ResNetFPNTrackModel
pred_model = ResNetFPNTrackModel()
predcfg = PredictConfig(
model=pred_model,
session_init=get_model_loader(load),
input_names=pred_model.get_inference_tensor_names()[0],
output_names=pred_model.get_inference_tensor_names()[1])
return OfflinePredictor(predcfg)
def init(self, image, box):
ref_img = np.array(image)[..., ::-1]
if ref_img is None:
raise ValueError("failed to load img" + image.filename)
box[2] += box[0]
box[3] += box[1]
ref_bbox = box
self._prev_box = box
if self._need_network:
resized_ref_img, resized_ref_box = self._resize_image_together_with_boxes(ref_img, ref_bbox)
feats, = self._extract_func(resized_ref_img, resized_ref_box[np.newaxis])
self._ff_gt_feats = feats[0]
def update(self, image, use_confidences=False):
if self._need_img:
target_img = np.array(image)[..., ::-1]
if target_img is None:
raise ValueError("failed to load img" + str(target_img))
else:
target_img = None
new_box, score = self._update(target_img)
if new_box is not None:
self._prev_box = new_box
ret_box = self._prev_box.copy()
ret_box[2] -= ret_box[0]
ret_box[3] -= ret_box[1]
if self._rotated_bbox is not None:
ret_box = self._rotated_bbox
if use_confidences:
return ret_box, score
else:
return ret_box
class ArgmaxTracker(PrecomputingReferenceTracker):
def __init__(self):
super().__init__("ArgmaxTracker")
def _update(self, img):
from eval import predict_image_track_with_precomputed_ref_features
results = predict_image_track_with_precomputed_ref_features(img, self._ff_gt_feats, self._pred_func)
det_boxes = np.array([r.box for r in results])
det_scores = np.array([r.score for r in results])
if len(det_boxes) > 0:
return det_boxes[0], det_scores[0]
else:
return None, None
# just there to test the precomputing on against
# not intended to be used anymore
class NonPrecomputingArgmaxTracker(Tracker):
def __init__(self):
super().__init__(name='ArgmaxTracker', is_deterministic=True)
self._ref_img = None
self._ref_bbox = None
self._prev_box = None
model = self._init_model()
load = "train_log/condrcnn_onlygot/model-460000"
predcfg = PredictConfig(
model=model,
session_init=get_model_loader(load),
input_names=model.get_inference_tensor_names()[0],
output_names=model.get_inference_tensor_names()[1])
self._pred_func = OfflinePredictor(predcfg)
def _init_model(self):
logger.set_logger_dir("/tmp/test_log/", 'd')
from dataset import DetectionDataset
from train import ResNetFPNTrackModel
# init tensorpack model
cfg.freeze(False)
model = ResNetFPNTrackModel()
DetectionDataset() # initialize the config with information from our dataset
finalize_configs(is_training=False)
return model
def init(self, image, box):
self._ref_img = cv2.imread(image.filename, cv2.IMREAD_COLOR)
if self._ref_img is None:
raise ValueError("failed to load img" + str(self._ref_img))
box[2] += box[0]
box[3] += box[1]
self._ref_bbox = box
self._prev_box = box
def update(self, image):
target_img = cv2.imread(image.filename, cv2.IMREAD_COLOR)
# assert target_img is not None
if target_img is None:
raise ValueError("failed to load img" + str(target_img))
from eval import predict_image_track
results = predict_image_track(target_img, self._ref_img, self._ref_bbox, self._pred_func)
det_boxes = np.array([r.box for r in results])
det_scores = np.array([r.score for r in results])
if len(det_boxes) > 0:
self._prev_box = det_boxes[0]
ret_box = self._prev_box.copy()
ret_box[2] -= ret_box[0]
ret_box[3] -= ret_box[1]
return ret_box
| [
"tensorpack.get_model_loader",
"random.randint",
"eval.predict_image_track_with_precomputed_ref_features",
"tensorpack.OfflinePredictor",
"config.finalize_configs",
"config.config.freeze",
"common.box_to_point8",
"common.CustomResize",
"dataset.DetectionDataset",
"cv2.imread",
"common.point8_to_... | [((516, 584), 'common.CustomResize', 'CustomResize', (['cfg.PREPROC.TEST_SHORT_EDGE_SIZE', 'cfg.PREPROC.MAX_SIZE'], {}), '(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)\n', (528, 584), False, 'from common import CustomResize, box_to_point8, point8_to_box\n'), ((3594, 3615), 'train.ResNetFPNTrackModel', 'ResNetFPNTrackModel', ([], {}), '()\n', (3613, 3615), False, 'from train import ResNetFPNTrackModel\n'), ((3880, 3905), 'tensorpack.OfflinePredictor', 'OfflinePredictor', (['predcfg'], {}), '(predcfg)\n', (3896, 3905), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((5384, 5478), 'eval.predict_image_track_with_precomputed_ref_features', 'predict_image_track_with_precomputed_ref_features', (['img', 'self._ff_gt_feats', 'self._pred_func'], {}), '(img, self._ff_gt_feats,\n self._pred_func)\n', (5433, 5478), False, 'from eval import predict_image_track_with_precomputed_ref_features\n'), ((5495, 5529), 'numpy.array', 'np.array', (['[r.box for r in results]'], {}), '([r.box for r in results])\n', (5503, 5529), True, 'import numpy as np\n'), ((5551, 5587), 'numpy.array', 'np.array', (['[r.score for r in results]'], {}), '([r.score for r in results])\n', (5559, 5587), True, 'import numpy as np\n'), ((6375, 6400), 'tensorpack.OfflinePredictor', 'OfflinePredictor', (['predcfg'], {}), '(predcfg)\n', (6391, 6400), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((6437, 6481), 'tensorpack.logger.set_logger_dir', 'logger.set_logger_dir', (['"""/tmp/test_log/"""', '"""d"""'], {}), "('/tmp/test_log/', 'd')\n", (6458, 6481), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((6613, 6630), 'config.config.freeze', 'cfg.freeze', (['(False)'], {}), '(False)\n', (6623, 6630), True, 'from config import config as cfg, finalize_configs\n'), ((6647, 6668), 'train.ResNetFPNTrackModel', 'ResNetFPNTrackModel', ([], {}), '()\n', (6666, 6668), False, 'from train import ResNetFPNTrackModel\n'), ((6677, 6695), 'dataset.DetectionDataset', 'DetectionDataset', ([], {}), '()\n', (6693, 6695), False, 'from dataset import DetectionDataset\n'), ((6763, 6798), 'config.finalize_configs', 'finalize_configs', ([], {'is_training': '(False)'}), '(is_training=False)\n', (6779, 6798), False, 'from config import config as cfg, finalize_configs\n'), ((6877, 6921), 'cv2.imread', 'cv2.imread', (['image.filename', 'cv2.IMREAD_COLOR'], {}), '(image.filename, cv2.IMREAD_COLOR)\n', (6887, 6921), False, 'import cv2\n'), ((7187, 7231), 'cv2.imread', 'cv2.imread', (['image.filename', 'cv2.IMREAD_COLOR'], {}), '(image.filename, cv2.IMREAD_COLOR)\n', (7197, 7231), False, 'import cv2\n'), ((7435, 7514), 'eval.predict_image_track', 'predict_image_track', (['target_img', 'self._ref_img', 'self._ref_bbox', 'self._pred_func'], {}), '(target_img, self._ref_img, self._ref_bbox, self._pred_func)\n', (7454, 7514), False, 'from eval import predict_image_track\n'), ((7535, 7569), 'numpy.array', 'np.array', (['[r.box for r in results]'], {}), '([r.box for r in results])\n', (7543, 7569), True, 'import numpy as np\n'), ((7591, 7627), 'numpy.array', 'np.array', (['[r.score for r in results]'], {}), '([r.score for r in results])\n', (7599, 7627), True, 'import numpy as np\n'), ((1944, 1962), 'dataset.DetectionDataset', 'DetectionDataset', ([], {}), '()\n', (1960, 1962), False, 'from dataset import DetectionDataset\n'), ((2129, 2145), 'train.ResNetFPNModel', 'ResNetFPNModel', ([], {}), '()\n', (2143, 2145), False, 'from train import ResNetFPNModel\n'), ((2396, 2431), 'config.finalize_configs', 'finalize_configs', ([], {'is_training': '(False)'}), '(is_training=False)\n', (2412, 2431), False, 'from config import config as cfg, finalize_configs\n'), ((2465, 2503), 'tensorpack.OfflinePredictor', 'OfflinePredictor', (['extract_ff_feats_cfg'], {}), '(extract_ff_feats_cfg)\n', (2481, 2503), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((3112, 3132), 'common.box_to_point8', 'box_to_point8', (['boxes'], {}), '(boxes)\n', (3125, 3132), False, 'from common import CustomResize, box_to_point8, point8_to_box\n'), ((3227, 3248), 'common.point8_to_box', 'point8_to_box', (['points'], {}), '(points)\n', (3240, 3248), False, 'from common import CustomResize, box_to_point8, point8_to_box\n'), ((3957, 3972), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3965, 3972), True, 'import numpy as np\n'), ((3304, 3337), 'numpy.squeeze', 'np.squeeze', (['resized_boxes'], {'axis': '(0)'}), '(resized_boxes, axis=0)\n', (3314, 3337), True, 'import numpy as np\n'), ((3704, 3726), 'tensorpack.get_model_loader', 'get_model_loader', (['load'], {}), '(load)\n', (3720, 3726), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((4550, 4565), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4558, 4565), True, 'import numpy as np\n'), ((6198, 6220), 'tensorpack.get_model_loader', 'get_model_loader', (['load'], {}), '(load)\n', (6214, 6220), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((2262, 2284), 'tensorpack.get_model_loader', 'get_model_loader', (['load'], {}), '(load)\n', (2278, 2284), False, 'from tensorpack import PredictConfig, get_model_loader, OfflinePredictor, logger\n'), ((842, 866), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (856, 866), False, 'import random\n')] |
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from .mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc import autoargs
class SwimmerEnv(MujocoEnv, Serializable):
FILE = 'swimmer.xml'
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(SwimmerEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
done = False
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| [
"numpy.std",
"numpy.square",
"rllab.envs.base.Step",
"numpy.max",
"numpy.mean",
"numpy.min",
"rllab.misc.autoargs.arg"
] | [((317, 403), 'rllab.misc.autoargs.arg', 'autoargs.arg', (['"""ctrl_cost_coeff"""'], {'type': 'float', 'help': '"""cost coefficient for controls"""'}), "('ctrl_cost_coeff', type=float, help=\n 'cost coefficient for controls')\n", (329, 403), False, 'from rllab.misc import autoargs\n'), ((1295, 1323), 'rllab.envs.base.Step', 'Step', (['next_obs', 'reward', 'done'], {}), '(next_obs, reward, done)\n', (1299, 1323), False, 'from rllab.envs.base import Step\n'), ((1563, 1577), 'numpy.mean', 'np.mean', (['progs'], {}), '(progs)\n', (1570, 1577), True, 'import numpy as np\n'), ((1631, 1644), 'numpy.max', 'np.max', (['progs'], {}), '(progs)\n', (1637, 1644), True, 'import numpy as np\n'), ((1698, 1711), 'numpy.min', 'np.min', (['progs'], {}), '(progs)\n', (1704, 1711), True, 'import numpy as np\n'), ((1765, 1778), 'numpy.std', 'np.std', (['progs'], {}), '(progs)\n', (1771, 1778), True, 'import numpy as np\n'), ((1128, 1155), 'numpy.square', 'np.square', (['(action / scaling)'], {}), '(action / scaling)\n', (1137, 1155), True, 'import numpy as np\n')] |
# !/usr/bin python3
# encoding : utf-8 -*-
# @author : <NAME>
# @file : shrodinger_equation_pinn.py
# @Time : 2021/12/4 15:11
import os
import sys
sys.path.append(os.curdir) # add ROOT to PATH
import time
import yaml
import random
import logging
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import optim
from torch.utils.data import DataLoader
from data.MyDataset import MyDataset
from models.PINN_models import PINN
from data.DataCreator import ShrodingerEquationDataCreator
logging.basicConfig(level=logging.NOTSET,
format="%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
# create logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# create file handler
run_time = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
log_file_path = os.path.join("./logs", "{}.log".format(run_time))
fh = logging.FileHandler(log_file_path, mode="w")
fh.setLevel(logging.NOTSET)
# output format
basic_format = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(basic_format)
# add logger to handler
logger.addHandler(fh)
class ShrodingerEquationPinn(object):
def __init__(self, conf, load_weight_path=None):
super(ShrodingerEquationPinn, self).__init__()
logger.info("PINN for Shrodinger Equation \n \n \n")
logger.info("hyps list {}".format(conf))
self.conf = conf
# seed
random.seed(conf["seed"])
torch.manual_seed(conf["seed"])
torch.random.manual_seed(conf["seed"])
torch.cuda.manual_seed_all(conf["seed"])
np.random.seed(conf["seed"])
# device
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.EPOCHS = conf["max_epochs"]
# ---------------------------------------- #
# ----------------- data ----------------- #
# ---------------------------------------- #
self.data_creator = ShrodingerEquationDataCreator(conf)
begin_fd_time = time.time()
logger.info("create data ...")
self.data_creator.iter_func()
over_fd_time = time.time()
logger.info("finite difference with fourth lunge-kutta method uses {:.5f}s".format(over_fd_time - begin_fd_time))
self.data_creator.plot_func(save_figure=True)
logger.info("save figure as {}".format(self.data_creator.figure_output_path))
# sample
data_dict = self.data_creator.sampling(boundary_num=conf["boundary_num"],
initial_num=conf["initial_num"],
common_num=conf["common_num"], seed=conf["seed"])
# raw data
boundary_data = data_dict["boundary"]
initial_data = data_dict["initial"]
common_data = data_dict["common"]
# dataset
boundary_dataset = MyDataset(boundary_data)
initial_dataset = MyDataset(initial_data)
common_dataset = MyDataset(common_data)
# dataloader
if conf["boundary_batch_size"] == -1:
self.boundary_dataloader = DataLoader(boundary_dataset, batch_size=conf["boundary_num"],
collate_fn=boundary_dataset.collate_fn)
else:
self.boundary_dataloader = DataLoader(boundary_dataset, batch_size=conf["boundary_batch_size"],
collate_fn=boundary_dataset.collate_fn)
if conf["initial_batch_size"] == -1:
self.initial_dataloader = DataLoader(initial_dataset, batch_size=conf["initial_num"],
collate_fn=initial_dataset.collate_fn)
else:
self.initial_dataloader = DataLoader(initial_dataset, batch_size=conf["initial_batch_size"],
collate_fn=initial_dataset.collate_fn)
if conf["common_batch_size"] == -1:
self.common_dataloader = DataLoader(common_dataset, batch_size=conf["common_num"],
collate_fn=common_dataset.collate_fn)
else:
self.common_dataloader = DataLoader(common_dataset, batch_size=conf["common_batch_size"],
collate_fn=common_dataset.collate_fn)
logger.info("create dataset and dataloader done ...")
# ----------------------------------------- #
# ----------------- model ----------------- #
# ----------------------------------------- #
self.pinn_model = PINN(input_dim=2, output_dim=2, dim_list=conf["model_layers"]).to(self.device)
logger.info("create pinn done ...")
if conf["load_weight"] == "True":
logger.info("load weight ...")
self.pinn_model.load_state_dict(torch.load(load_weight_path))
# ------------------------------------------------------ #
# ----------------- optimizer and loss ----------------- #
# ------------------------------------------------------ #
params = [p for p in self.pinn_model.parameters() if p.requires_grad]
# self.optimizer = optim.LBFGS(params, lr=conf["lr"],
# max_iter=conf["max_iter"],
# max_eval=conf["max_eval"],
# history_size=conf["history_size"],
# tolerance_grad=conf["tolerance_grad"])
self.optimizer = optim.Adam(params)
# self.optimizer = optim.SGD(params, lr=5e-4)
logger.info("create optimizer and criterion done ...")
# ---------------------------------------- #
# ----------------- save ----------------- #
# ---------------------------------------- #
self.model_output_path = os.path.join(conf["model_output_root"], conf["model_output_name"])
self.pinn_val_output_figure = os.path.join(conf["figure_output_root"], conf["pinn_figure_output_name"])
def train(self):
logger.info("begin train ...")
begin_train_time = time.time()
for epoch in range(1, self.EPOCHS+1):
self.pinn_model.train()
self.optimizer.zero_grad()
boundary_loss = np.zeros(1)
initial_loss = np.zeros(1)
common_loss = np.zeros(1)
# boundary data
for step, batch in enumerate(self.boundary_dataloader):
boundary_input_tensor_list = batch["input"]
boundary_input_x = boundary_input_tensor_list[0].to(self.device)
boundary_input_t = boundary_input_tensor_list[1].to(self.device)
boundary_pred_tensor = self.pinn_model(boundary_input_x, boundary_input_t)
boundary_loss = torch.mean((boundary_pred_tensor -
torch.zeros_like(boundary_pred_tensor).to(self.device))**2)
boundary_loss.backward()
# initial data
for step, batch in enumerate(self.initial_dataloader):
initial_input_tensor_list = batch["input"]
initial_output_tensor = batch["output"].to(self.device)
initial_input_x = initial_input_tensor_list[0].to(self.device)
initial_input_t = initial_input_tensor_list[1].to(self.device)
initial_pred_tensor = self.pinn_model(initial_input_x, initial_input_t)
initial_loss = torch.mean((initial_pred_tensor-initial_output_tensor)**2)
initial_loss.backward()
# common data
for step, batch in enumerate(self.common_dataloader):
common_input_tensor_list = batch["input"]
common_input_x = common_input_tensor_list[0].to(self.device)
common_input_t = common_input_tensor_list[1].to(self.device)
mask_matrix = torch.ones(common_input_x.shape[0]).to(self.device)
# y
common_pred_tensor = self.pinn_model(common_input_x, common_input_t)
# dy/dx
dy_dx_real = torch.autograd.grad(common_pred_tensor[:, 0], common_input_x,
grad_outputs=mask_matrix,
create_graph=True, retain_graph=True)[0]
dy_dx_imag = torch.autograd.grad(common_pred_tensor[:, 1], common_input_x,
grad_outputs=mask_matrix,
create_graph=True, retain_graph=True)[0]
# dy/dt
dy_dt_real = torch.autograd.grad(common_pred_tensor[:, 0], common_input_t,
grad_outputs=mask_matrix,
create_graph=True, retain_graph=True)[0]
dy_dt_imag = torch.autograd.grad(common_pred_tensor[:, 1], common_input_t,
grad_outputs=mask_matrix,
create_graph=True, retain_graph=True)[0]
# d^2y/dx^2
dy_dx_real_2nd = torch.autograd.grad(dy_dx_real, common_input_x,
grad_outputs=mask_matrix,
create_graph=True, retain_graph=True)[0]
dy_dx_imag_2nd = torch.autograd.grad(dy_dx_imag, common_input_x,
grad_outputs=mask_matrix,
create_graph=True, retain_graph=True, allow_unused=True)[0]
# PDE output
pde_output_real = 2*dy_dx_real_2nd - dy_dt_imag
pde_output_imag = 2*dy_dx_imag_2nd + dy_dt_real
common_loss = torch.mean(pde_output_real**2)+torch.mean(pde_output_imag**2)
common_loss.backward()
self.optimizer.step()
if epoch % 10 == 0:
logger.info("[{}/{}]\tboundary loss:{:.7f}\tinitial loss:{:.7f}\tcommon loss:{:.7f}".format(epoch,
self.EPOCHS,
boundary_loss.item(),
initial_loss.item(),
common_loss.item()))
over_train_time = time.time()
logger.info("train over ...")
logger.info("train {} epochs in {:.5f}s".format(self.EPOCHS, over_train_time-begin_train_time))
torch.save(self.pinn_model.state_dict(), self.model_output_path)
logger.info("save model as {}".format(self.model_output_path))
def pred_and_valuation(self):
logger.info("begin pred ...")
with torch.no_grad():
self.pinn_model.eval()
# data
true_output_matrix = self.data_creator.phi_matrix
x_position_list = np.linspace(-self.data_creator.box_l / 2, self.data_creator.box_l / 2,
self.data_creator.space_n)
t_position_list = np.linspace(0, self.data_creator.time_total, self.data_creator.time_n)
pred_output_matrix = np.zeros((int(self.data_creator.space_n),
int(self.data_creator.time_n))).astype(np.complex64)
x_position_tensor = torch.from_numpy(x_position_list).type(torch.float32).to(self.device)
# pred
begin_pred_time = time.time()
for t, time_point in enumerate(t_position_list):
t_tensor = torch.ones_like(x_position_tensor)*time_point
pred_ = self.pinn_model(x_position_tensor, t_tensor.to(self.device)).detach().cpu().numpy()
pred_output_matrix[:, t] = pred_[:, 0] + 1.j * pred_[:, 1]
if t % 1000 == 0:
print(np.max(pred_output_matrix[:, t]), np.min(pred_output_matrix[:, t]))
logger.info("[{}/{}] pred done ...".format(t, len(t_position_list)))
over_pred_time = time.time()
logger.info("pred done ...")
logger.info("pred over at {:.5f}s ...".format(over_pred_time-begin_pred_time))
# valuation
l1_norm = np.linalg.norm(pred_output_matrix.reshape(-1)-true_output_matrix.reshape(-1), 1)
l2_norm = np.linalg.norm(pred_output_matrix.reshape(-1)-true_output_matrix.reshape(-1))
max_error = np.max(np.abs(pred_output_matrix-true_output_matrix))
min_error = np.min(np.abs(pred_output_matrix-true_output_matrix))
average_error = np.average(np.abs(pred_output_matrix-true_output_matrix))
logger.info("l1 norm {:.7f}".format(l1_norm))
logger.info("l2 norm {:.7f}".format(l2_norm))
logger.info("max error {:.7f}".format(max_error))
logger.info("min error {:.7f}".format(min_error))
logger.info("average error {:.7f}".format(average_error))
self.data_creator.figure_output_path = os.path.join(self.conf["figure_output_root"],
self.conf["pinn_figure_output_name"])
self.data_creator.phi_matrix = pred_output_matrix.copy()
self.data_creator.plot_func(save_figure=True)
if __name__ == "__main__":
_conf = yaml.load(open("./conf/pinn_shrodinger_equation.yaml"), Loader=yaml.FullLoader)
weight_path = r"./output/weights/shrodinger_equation_pinn.pth"
main_ = ShrodingerEquationPinn(_conf, weight_path)
main_.train()
main_.pred_and_valuation()
| [
"numpy.random.seed",
"numpy.abs",
"torch.autograd.grad",
"models.PINN_models.PINN",
"logging.Formatter",
"torch.device",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"torch.ones",
"logging.FileHandler",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.max",
"random.seed",
"nu... | [((262, 288), 'sys.path.append', 'sys.path.append', (['os.curdir'], {}), '(os.curdir)\n', (277, 288), False, 'import sys\n'), ((627, 756), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.NOTSET', 'format': '"""%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"""'}), "(level=logging.NOTSET, format=\n '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n", (646, 756), False, 'import logging\n'), ((797, 816), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (814, 816), False, 'import logging\n'), ((1009, 1053), 'logging.FileHandler', 'logging.FileHandler', (['log_file_path'], {'mode': '"""w"""'}), "(log_file_path, mode='w')\n", (1028, 1053), False, 'import logging\n'), ((1114, 1212), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"""'], {}), "(\n '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n", (1131, 1212), False, 'import logging\n'), ((924, 935), 'time.time', 'time.time', ([], {}), '()\n', (933, 935), False, 'import time\n'), ((1597, 1622), 'random.seed', 'random.seed', (["conf['seed']"], {}), "(conf['seed'])\n", (1608, 1622), False, 'import random\n'), ((1631, 1662), 'torch.manual_seed', 'torch.manual_seed', (["conf['seed']"], {}), "(conf['seed'])\n", (1648, 1662), False, 'import torch\n'), ((1671, 1709), 'torch.random.manual_seed', 'torch.random.manual_seed', (["conf['seed']"], {}), "(conf['seed'])\n", (1695, 1709), False, 'import torch\n'), ((1718, 1758), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["conf['seed']"], {}), "(conf['seed'])\n", (1744, 1758), False, 'import torch\n'), ((1767, 1795), 'numpy.random.seed', 'np.random.seed', (["conf['seed']"], {}), "(conf['seed'])\n", (1781, 1795), True, 'import numpy as np\n'), ((1824, 1849), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1847, 1849), False, 'import torch\n'), ((2187, 2222), 'data.DataCreator.ShrodingerEquationDataCreator', 'ShrodingerEquationDataCreator', (['conf'], {}), '(conf)\n', (2216, 2222), False, 'from data.DataCreator import ShrodingerEquationDataCreator\n'), ((2247, 2258), 'time.time', 'time.time', ([], {}), '()\n', (2256, 2258), False, 'import time\n'), ((2359, 2370), 'time.time', 'time.time', ([], {}), '()\n', (2368, 2370), False, 'import time\n'), ((3107, 3131), 'data.MyDataset.MyDataset', 'MyDataset', (['boundary_data'], {}), '(boundary_data)\n', (3116, 3131), False, 'from data.MyDataset import MyDataset\n'), ((3158, 3181), 'data.MyDataset.MyDataset', 'MyDataset', (['initial_data'], {}), '(initial_data)\n', (3167, 3181), False, 'from data.MyDataset import MyDataset\n'), ((3207, 3229), 'data.MyDataset.MyDataset', 'MyDataset', (['common_data'], {}), '(common_data)\n', (3216, 3229), False, 'from data.MyDataset import MyDataset\n'), ((5752, 5770), 'torch.optim.Adam', 'optim.Adam', (['params'], {}), '(params)\n', (5762, 5770), False, 'from torch import optim\n'), ((6081, 6147), 'os.path.join', 'os.path.join', (["conf['model_output_root']", "conf['model_output_name']"], {}), "(conf['model_output_root'], conf['model_output_name'])\n", (6093, 6147), False, 'import os\n'), ((6186, 6259), 'os.path.join', 'os.path.join', (["conf['figure_output_root']", "conf['pinn_figure_output_name']"], {}), "(conf['figure_output_root'], conf['pinn_figure_output_name'])\n", (6198, 6259), False, 'import os\n'), ((6349, 6360), 'time.time', 'time.time', ([], {}), '()\n', (6358, 6360), False, 'import time\n'), ((10964, 10975), 'time.time', 'time.time', ([], {}), '()\n', (10973, 10975), False, 'import time\n'), ((13587, 13675), 'os.path.join', 'os.path.join', (["self.conf['figure_output_root']", "self.conf['pinn_figure_output_name']"], {}), "(self.conf['figure_output_root'], self.conf[\n 'pinn_figure_output_name'])\n", (13599, 13675), False, 'import os\n'), ((1877, 1897), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1889, 1897), False, 'import torch\n'), ((1938, 1957), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1950, 1957), False, 'import torch\n'), ((3336, 3442), 'torch.utils.data.DataLoader', 'DataLoader', (['boundary_dataset'], {'batch_size': "conf['boundary_num']", 'collate_fn': 'boundary_dataset.collate_fn'}), "(boundary_dataset, batch_size=conf['boundary_num'], collate_fn=\n boundary_dataset.collate_fn)\n", (3346, 3442), False, 'from torch.utils.data import DataLoader\n'), ((3541, 3653), 'torch.utils.data.DataLoader', 'DataLoader', (['boundary_dataset'], {'batch_size': "conf['boundary_batch_size']", 'collate_fn': 'boundary_dataset.collate_fn'}), "(boundary_dataset, batch_size=conf['boundary_batch_size'],\n collate_fn=boundary_dataset.collate_fn)\n", (3551, 3653), False, 'from torch.utils.data import DataLoader\n'), ((3784, 3887), 'torch.utils.data.DataLoader', 'DataLoader', (['initial_dataset'], {'batch_size': "conf['initial_num']", 'collate_fn': 'initial_dataset.collate_fn'}), "(initial_dataset, batch_size=conf['initial_num'], collate_fn=\n initial_dataset.collate_fn)\n", (3794, 3887), False, 'from torch.utils.data import DataLoader\n'), ((3984, 4093), 'torch.utils.data.DataLoader', 'DataLoader', (['initial_dataset'], {'batch_size': "conf['initial_batch_size']", 'collate_fn': 'initial_dataset.collate_fn'}), "(initial_dataset, batch_size=conf['initial_batch_size'],\n collate_fn=initial_dataset.collate_fn)\n", (3994, 4093), False, 'from torch.utils.data import DataLoader\n'), ((4221, 4321), 'torch.utils.data.DataLoader', 'DataLoader', (['common_dataset'], {'batch_size': "conf['common_num']", 'collate_fn': 'common_dataset.collate_fn'}), "(common_dataset, batch_size=conf['common_num'], collate_fn=\n common_dataset.collate_fn)\n", (4231, 4321), False, 'from torch.utils.data import DataLoader\n'), ((4416, 4523), 'torch.utils.data.DataLoader', 'DataLoader', (['common_dataset'], {'batch_size': "conf['common_batch_size']", 'collate_fn': 'common_dataset.collate_fn'}), "(common_dataset, batch_size=conf['common_batch_size'], collate_fn\n =common_dataset.collate_fn)\n", (4426, 4523), False, 'from torch.utils.data import DataLoader\n'), ((6512, 6523), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6520, 6523), True, 'import numpy as np\n'), ((6551, 6562), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6559, 6562), True, 'import numpy as np\n'), ((6589, 6600), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6597, 6600), True, 'import numpy as np\n'), ((11349, 11364), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11362, 11364), False, 'import torch\n'), ((11513, 11615), 'numpy.linspace', 'np.linspace', (['(-self.data_creator.box_l / 2)', '(self.data_creator.box_l / 2)', 'self.data_creator.space_n'], {}), '(-self.data_creator.box_l / 2, self.data_creator.box_l / 2, self\n .data_creator.space_n)\n', (11524, 11615), True, 'import numpy as np\n'), ((11683, 11753), 'numpy.linspace', 'np.linspace', (['(0)', 'self.data_creator.time_total', 'self.data_creator.time_n'], {}), '(0, self.data_creator.time_total, self.data_creator.time_n)\n', (11694, 11753), True, 'import numpy as np\n'), ((12079, 12090), 'time.time', 'time.time', ([], {}), '()\n', (12088, 12090), False, 'import time\n'), ((12657, 12668), 'time.time', 'time.time', ([], {}), '()\n', (12666, 12668), False, 'import time\n'), ((13045, 13092), 'numpy.abs', 'np.abs', (['(pred_output_matrix - true_output_matrix)'], {}), '(pred_output_matrix - true_output_matrix)\n', (13051, 13092), True, 'import numpy as np\n'), ((13119, 13166), 'numpy.abs', 'np.abs', (['(pred_output_matrix - true_output_matrix)'], {}), '(pred_output_matrix - true_output_matrix)\n', (13125, 13166), True, 'import numpy as np\n'), ((13201, 13248), 'numpy.abs', 'np.abs', (['(pred_output_matrix - true_output_matrix)'], {}), '(pred_output_matrix - true_output_matrix)\n', (13207, 13248), True, 'import numpy as np\n'), ((4819, 4881), 'models.PINN_models.PINN', 'PINN', ([], {'input_dim': '(2)', 'output_dim': '(2)', 'dim_list': "conf['model_layers']"}), "(input_dim=2, output_dim=2, dim_list=conf['model_layers'])\n", (4823, 4881), False, 'from models.PINN_models import PINN\n'), ((5071, 5099), 'torch.load', 'torch.load', (['load_weight_path'], {}), '(load_weight_path)\n', (5081, 5099), False, 'import torch\n'), ((7731, 7793), 'torch.mean', 'torch.mean', (['((initial_pred_tensor - initial_output_tensor) ** 2)'], {}), '((initial_pred_tensor - initial_output_tensor) ** 2)\n', (7741, 7793), False, 'import torch\n'), ((8379, 8509), 'torch.autograd.grad', 'torch.autograd.grad', (['common_pred_tensor[:, 0]', 'common_input_x'], {'grad_outputs': 'mask_matrix', 'create_graph': '(True)', 'retain_graph': '(True)'}), '(common_pred_tensor[:, 0], common_input_x, grad_outputs=\n mask_matrix, create_graph=True, retain_graph=True)\n', (8398, 8509), False, 'import torch\n'), ((8635, 8765), 'torch.autograd.grad', 'torch.autograd.grad', (['common_pred_tensor[:, 1]', 'common_input_x'], {'grad_outputs': 'mask_matrix', 'create_graph': '(True)', 'retain_graph': '(True)'}), '(common_pred_tensor[:, 1], common_input_x, grad_outputs=\n mask_matrix, create_graph=True, retain_graph=True)\n', (8654, 8765), False, 'import torch\n'), ((8916, 9046), 'torch.autograd.grad', 'torch.autograd.grad', (['common_pred_tensor[:, 0]', 'common_input_t'], {'grad_outputs': 'mask_matrix', 'create_graph': '(True)', 'retain_graph': '(True)'}), '(common_pred_tensor[:, 0], common_input_t, grad_outputs=\n mask_matrix, create_graph=True, retain_graph=True)\n', (8935, 9046), False, 'import torch\n'), ((9172, 9302), 'torch.autograd.grad', 'torch.autograd.grad', (['common_pred_tensor[:, 1]', 'common_input_t'], {'grad_outputs': 'mask_matrix', 'create_graph': '(True)', 'retain_graph': '(True)'}), '(common_pred_tensor[:, 1], common_input_t, grad_outputs=\n mask_matrix, create_graph=True, retain_graph=True)\n', (9191, 9302), False, 'import torch\n'), ((9461, 9576), 'torch.autograd.grad', 'torch.autograd.grad', (['dy_dx_real', 'common_input_x'], {'grad_outputs': 'mask_matrix', 'create_graph': '(True)', 'retain_graph': '(True)'}), '(dy_dx_real, common_input_x, grad_outputs=mask_matrix,\n create_graph=True, retain_graph=True)\n', (9480, 9576), False, 'import torch\n'), ((9715, 9849), 'torch.autograd.grad', 'torch.autograd.grad', (['dy_dx_imag', 'common_input_x'], {'grad_outputs': 'mask_matrix', 'create_graph': '(True)', 'retain_graph': '(True)', 'allow_unused': '(True)'}), '(dy_dx_imag, common_input_x, grad_outputs=mask_matrix,\n create_graph=True, retain_graph=True, allow_unused=True)\n', (9734, 9849), False, 'import torch\n'), ((10143, 10175), 'torch.mean', 'torch.mean', (['(pde_output_real ** 2)'], {}), '(pde_output_real ** 2)\n', (10153, 10175), False, 'import torch\n'), ((10174, 10206), 'torch.mean', 'torch.mean', (['(pde_output_imag ** 2)'], {}), '(pde_output_imag ** 2)\n', (10184, 10206), False, 'import torch\n'), ((12179, 12213), 'torch.ones_like', 'torch.ones_like', (['x_position_tensor'], {}), '(x_position_tensor)\n', (12194, 12213), False, 'import torch\n'), ((8167, 8202), 'torch.ones', 'torch.ones', (['common_input_x.shape[0]'], {}), '(common_input_x.shape[0])\n', (8177, 8202), False, 'import torch\n'), ((12471, 12503), 'numpy.max', 'np.max', (['pred_output_matrix[:, t]'], {}), '(pred_output_matrix[:, t])\n', (12477, 12503), True, 'import numpy as np\n'), ((12505, 12537), 'numpy.min', 'np.min', (['pred_output_matrix[:, t]'], {}), '(pred_output_matrix[:, t])\n', (12511, 12537), True, 'import numpy as np\n'), ((11959, 11992), 'torch.from_numpy', 'torch.from_numpy', (['x_position_list'], {}), '(x_position_list)\n', (11975, 11992), False, 'import torch\n'), ((7124, 7162), 'torch.zeros_like', 'torch.zeros_like', (['boundary_pred_tensor'], {}), '(boundary_pred_tensor)\n', (7140, 7162), False, 'import torch\n')] |
from matplotlib import pyplot as plt
from config import *
from shared_utils import *
from plot_utils import *
import pickle as pkl
import numpy as np
from collections import OrderedDict
diseases = ["campylobacter", "rotavirus", "borreliosis"]
with open('../data/counties/counties.pkl',"rb") as f:
counties = pkl.load(f)
xlim = (5.5,15.5)
ylim = (47,56)
countyByName = OrderedDict([('Düsseldorf', '05111'),('Recklinghausen', '05562'),
("Hannover", "03241"), ("Hamburg", "02000"),
("Berlin-Mitte", "11001"), ("Osnabrück", "03404"),
("Frankfurt (Main)", "06412"),
("Görlitz", "14626"), ("Stuttgart","08111"),
("Potsdam", "12054"), ("Köln", "05315"),
("Aachen", "05334"), ("Rostock", "13003"),
("Flensburg", "01001"), ("Frankfurt (Oder)", "12053"),
("Lübeck", "01003"),("Münster", "05515"),
("Ber<NAME>", "11008"), ('Göttingen', "03159"),
("Cottbus", "12052"), ("Erlangen", "09562"),
("Regensburg", "09362"), ("Bayreuth", "09472"),
("Bautzen", "14625"), ('Nürnberg', '09564'),
('München', '09162'), ("Würzburg", "09679"),
("Deggendorf", "09271"), ("Ansbach", "09571"),
("Rottal-Inn", "09277"), ("Passau", "09275"),
("Schwabach", "09565"), ("Memmingen", "09764"),
("Erlangen-Höchstadt", "09572"), ("Nürnberger Land", "09574"),
('Roth', "09576"), ('Starnberg', "09188"),
('Berchtesgadener Land', "09172"), ('Schweinfurt', "09678"),
("Augsburg","09772" ), ('Neustadt a.d.Waldnaab', "09374"),
("Fürstenfeldbruck", "09179"), ('Rosenheim', "09187"),
("Straubing", "09263"), ("Erding", "09177"),
("Tirschenreuth", "09377"), ('Miltenberg', "09676"),
('Neumarkt i.d.OPf.', "09373")])
plot_county_names = {"campylobacter": ["Düsseldorf", "Recklinghausen", "Hannover", "München",
"Hamburg", "Berlin-Mitte", "Osnabrück", "Frankfurt (Main)",
"Görlitz", "Stuttgart", "Potsdam", "Köln", "Aachen", "Rostock",
"Flensburg", "Frankfurt (Oder)", "Lübeck", "Münster", "Berlin Neukölln",
"Göttingen", "Cottbus", "Erlangen", "Regensburg", "Bayreuth", "Nürnberg"],
"rotavirus": ["Bautzen", "Hannover", "München", "Hamburg", "Düsseldorf", "Recklinghausen",
"Berlin-Mitte", "Frankfurt (Main)", "Görlitz", "Stuttgart", "Potsdam",
"Köln", "Aachen", "Rostock", "Flensburg", "Frankfurt (Oder)", "Lübeck", "Münster",
"<NAME>", "Göttingen", "Cottbus", "Erlangen", "Regensburg", "Bayreuth", "Nürnberg"],
"borreliosis": ["Erlangen", "Regensburg", "Bayreuth", "Würzburg", "Deggendorf",
"Ansbach", "Rottal-Inn", "Passau", "Schwabach", "Memmingen", "Erlangen-Höchstadt", "Nürnberger Land",
'Roth', 'Starnberg', 'Berchtesgadener Land', 'Schweinfurt', "Augsburg", 'Neustadt a.d.Waldnaab',
"Fürstenfeldbruck", 'Rosenheim', "Straubing", "Erding", "Tirschenreuth", 'Miltenberg', 'Neumarkt i.d.OPf.']}
# colors for curves
C1 = "#D55E00"
C2 = "#E69F00"
C3 = "#0073CF"
with open('../data/comparison.pkl',"rb") as f:
best_model=pkl.load(f)
for i,disease in enumerate(diseases):
# Load data
use_age = best_model[disease]["use_age"]
use_eastwest = best_model[disease]["use_eastwest"]
if disease=="borreliosis":
prediction_region = "bavaria"
use_eastwest = False
else:
prediction_region = "germany"
data = load_data(disease, prediction_region, counties)
data = data[data.index < parse_yearweek("2018-KW1")]
if disease == "borreliosis":
data = data[data.index >= parse_yearweek("2013-KW1")]
_, _, _, target = split_data(data)
county_ids = target.columns
# Load our prediction samples
res = load_pred(disease, use_age, use_eastwest)
prediction_samples = np.reshape(res['y'],(res["y"].shape[0],104,-1))
prediction_quantiles = quantiles(prediction_samples,(5,25,75,95))
prediction_mean = pd.DataFrame(data=np.mean(prediction_samples,axis=0), index=target.index, columns=target.columns)
prediction_q25 = pd.DataFrame(data=prediction_quantiles[25], index=target.index, columns=target.columns)
prediction_q75 = pd.DataFrame(data=prediction_quantiles[75], index=target.index, columns=target.columns)
prediction_q5 = pd.DataFrame(data=prediction_quantiles[5], index=target.index, columns=target.columns)
prediction_q95 = pd.DataFrame(data=prediction_quantiles[95], index=target.index, columns=target.columns)
# Load hhh4 predictions for reference
hhh4_predictions = pd.read_csv("../data/diseases/{}_hhh4.csv".format("borreliosis_notrend" if disease=="borreliosis" else disease))
weeks = hhh4_predictions.pop("weeks")
hhh4_predictions.index = parse_yearweek(weeks)
fig = plt.figure(figsize=(12, 12))
grid = plt.GridSpec(5, 5, top=0.90, bottom=0.11, left=0.07, right=0.92, hspace=0.2, wspace=0.3)
for j,name in enumerate(plot_county_names[disease]):
ax = fig.add_subplot(grid[np.unravel_index(list(range(25))[j],(5,5))])
county_id = countyByName[name]
dates = [n.wednesday() for n in target.index.values]
# plot our predictions w/ quartiles
p_pred=ax.plot_date(dates, prediction_mean[county_id], "-", color=C1, linewidth=2.0, zorder=4)
p_quant=ax.fill_between(dates, prediction_q25[county_id], prediction_q75[county_id], facecolor=C2, alpha=0.5, zorder=1)
ax.plot_date(dates, prediction_q25[county_id], ":", color=C2, linewidth=2.0, zorder=3)
ax.plot_date(dates, prediction_q75[county_id], ":", color=C2, linewidth=2.0, zorder=3)
# plot hhh4 reference prediction
p_hhh4=ax.plot_date(dates, hhh4_predictions[county_id], "-", color=C3, linewidth=2.0, zorder=3)
# plot ground truth
p_real=ax.plot_date(dates, target[county_id], "k.")
ax.set_title(name, fontsize=18)
ax.tick_params(axis="both", direction='out', size=2, labelsize=14)
plt.setp(ax.get_xticklabels(), visible=j>19, rotation=60)
ax.autoscale(False)
p_quant2=ax.fill_between(dates, prediction_q5[county_id], prediction_q95[county_id], facecolor=C2, alpha=0.25, zorder=0)
ax.plot_date(dates, prediction_q5[county_id], ":", color=C2, alpha=0.5, linewidth=2.0, zorder=1)
ax.plot_date(dates, prediction_q95[county_id], ":", color=C2, alpha=0.5, linewidth=2.0, zorder=1)
plt.legend([p_real[0], p_pred[0], p_hhh4[0], p_quant, p_quant2],
["reported", "predicted", "hhh4", "25\%-75\% quantile", "5\%-95\% quantile"],
fontsize=16, ncol=5, loc="upper center", bbox_to_anchor = (0,-0.01,1,1),
bbox_transform = plt.gcf().transFigure )
fig.text(0.5, 0.02, "Time [calendar weeks]", ha='center', fontsize=22)
fig.text(0.01, 0.46, "Reported/predicted infections", va='center', rotation='vertical', fontsize=22)
plt.savefig("../figures/curves_{}_appendix.pdf".format(disease))
# plt.show()
| [
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.mean",
"numpy.reshape",
"matplotlib.pyplot.GridSpec",
"collections.OrderedDict",
"matplotlib.pyplot.gcf"
] | [((376, 1658), 'collections.OrderedDict', 'OrderedDict', (["[('Düsseldorf', '05111'), ('Recklinghausen', '05562'), ('Hannover', '03241'\n ), ('Hamburg', '02000'), ('Berlin-Mitte', '11001'), ('Osnabrück',\n '03404'), ('Frankfurt (Main)', '06412'), ('Görlitz', '14626'), (\n 'Stuttgart', '08111'), ('Potsdam', '12054'), ('Köln', '05315'), (\n 'Aachen', '05334'), ('Rostock', '13003'), ('Flensburg', '01001'), (\n 'Frankfurt (Oder)', '12053'), ('Lübeck', '01003'), ('Münster', '05515'),\n ('Ber<NAME>', '11008'), ('Göttingen', '03159'), ('Cottbus', '12052'), (\n 'Erlangen', '09562'), ('Regensburg', '09362'), ('Bayreuth', '09472'), (\n 'Bautzen', '14625'), ('Nürnberg', '09564'), ('München', '09162'), (\n 'Würzburg', '09679'), ('Deggendorf', '09271'), ('Ansbach', '09571'), (\n 'Rottal-Inn', '09277'), ('Passau', '09275'), ('Schwabach', '09565'), (\n 'Memmingen', '09764'), ('Erlangen-Höchstadt', '09572'), (\n 'Nürnberger Land', '09574'), ('Roth', '09576'), ('Starnberg', '09188'),\n ('Berchtesgadener Land', '09172'), ('Schweinfurt', '09678'), (\n 'Augsburg', '09772'), ('Neustadt a.d.Waldnaab', '09374'), (\n 'Fürstenfeldbruck', '09179'), ('Rosenheim', '09187'), ('Straubing',\n '09263'), ('Erding', '09177'), ('Tirschenreuth', '09377'), (\n 'Miltenberg', '09676'), ('Neumarkt i.d.OPf.', '09373')]"], {}), "([('Düsseldorf', '05111'), ('Recklinghausen', '05562'), (\n 'Hannover', '03241'), ('Hamburg', '02000'), ('Berlin-Mitte', '11001'),\n ('Osnabrück', '03404'), ('Frankfurt (Main)', '06412'), ('Görlitz',\n '14626'), ('Stuttgart', '08111'), ('Potsdam', '12054'), ('Köln',\n '05315'), ('Aachen', '05334'), ('Rostock', '13003'), ('Flensburg',\n '01001'), ('Frankfurt (Oder)', '12053'), ('Lübeck', '01003'), (\n 'Münster', '05515'), ('Ber<NAME>', '11008'), ('Göttingen', '03159'), (\n 'Cottbus', '12052'), ('Erlangen', '09562'), ('Regensburg', '09362'), (\n 'Bayreuth', '09472'), ('Bautzen', '14625'), ('Nürnberg', '09564'), (\n 'München', '09162'), ('Würzburg', '09679'), ('Deggendorf', '09271'), (\n 'Ansbach', '09571'), ('Rottal-Inn', '09277'), ('Passau', '09275'), (\n 'Schwabach', '09565'), ('Memmingen', '09764'), ('Erlangen-Höchstadt',\n '09572'), ('Nürnberger Land', '09574'), ('Roth', '09576'), ('Starnberg',\n '09188'), ('Berchtesgadener Land', '09172'), ('Schweinfurt', '09678'),\n ('Augsburg', '09772'), ('Neustadt a.d.Waldnaab', '09374'), (\n 'Fürstenfeldbruck', '09179'), ('Rosenheim', '09187'), ('Straubing',\n '09263'), ('Erding', '09177'), ('Tirschenreuth', '09377'), (\n 'Miltenberg', '09676'), ('Neumarkt i.d.OPf.', '09373')])\n", (387, 1658), False, 'from collections import OrderedDict\n'), ((314, 325), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (322, 325), True, 'import pickle as pkl\n'), ((3878, 3889), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (3886, 3889), True, 'import pickle as pkl\n'), ((4595, 4645), 'numpy.reshape', 'np.reshape', (["res['y']", "(res['y'].shape[0], 104, -1)"], {}), "(res['y'], (res['y'].shape[0], 104, -1))\n", (4605, 4645), True, 'import numpy as np\n'), ((5551, 5579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (5561, 5579), True, 'from matplotlib import pyplot as plt\n'), ((5591, 5682), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(5)', '(5)'], {'top': '(0.9)', 'bottom': '(0.11)', 'left': '(0.07)', 'right': '(0.92)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(5, 5, top=0.9, bottom=0.11, left=0.07, right=0.92, hspace=0.2,\n wspace=0.3)\n', (5603, 5682), True, 'from matplotlib import pyplot as plt\n'), ((4754, 4789), 'numpy.mean', 'np.mean', (['prediction_samples'], {'axis': '(0)'}), '(prediction_samples, axis=0)\n', (4761, 4789), True, 'import numpy as np\n'), ((7425, 7434), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7432, 7434), True, 'from matplotlib import pyplot as plt\n')] |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import numpy
import pandas as pd
from motor.motor_asyncio import (AsyncIOMotorClient, AsyncIOMotorCollection,
AsyncIOMotorCursor)
from QUANTAXIS.QAUtil import (QA_Setting, QA_util_code_tolist,
QA_util_date_stamp, QA_util_date_str2int,
QA_util_date_valid, QA_util_dict_remove_key,
QA_util_log_info,
QA_util_sql_mongo_sort_DESCENDING,
QA_util_time_stamp, QA_util_to_json_from_pandas,
trade_date_sse)
from QUANTAXIS.QAUtil.QASetting import DATABASE, DATABASE_ASYNC
async def QA_fetch_stock_day(code, start, end, format='numpy', frequence='day', collections=DATABASE_ASYNC.stock_day):
'获取股票日线'
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
__data = []
cursor = collections.find({
'code': {'$in': code}, "date_stamp": {
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)}})
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
try:
res = pd.DataFrame([item async for item in cursor])
except SyntaxError:
print('THIS PYTHON VERSION NOT SUPPORT "async for" function')
pass
try:
res = res.drop('_id', axis=1).assign(volume=res.vol).query('volume>1').assign(date=pd.to_datetime(
res.date)).drop_duplicates((['date', 'code'])).set_index('date', drop=False)
res = res.ix[:, ['code', 'open', 'high', 'low',
'close', 'volume', 'amount', 'date']]
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info(
'QA Error QA_fetch_stock_day data parameter start=%s end=%s is not right' % (start, end))
async def QA_fetch_stock_min(code, start, end, format='numpy', frequence='1min', collections=DATABASE_ASYNC.stock_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
else:
print("QA Error QA_fetch_stock_min parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m" % frequence)
__data = []
# code checking
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
})
try:
res = pd.DataFrame([item async for item in cursor])
except SyntaxError:
print('THIS PYTHON VERSION NOT SUPPORT "async for" function')
pass
try:
res = res.drop('_id', axis=1).assign(volume=res.vol).query('volume>1').assign(datetime=pd.to_datetime(
res.datetime)).drop_duplicates(['datetime', 'code']).set_index('datetime', drop=False)
# return res
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_min format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
if __name__ == "__main__":
loop = asyncio.get_event_loop()
print(id(loop))
res = loop.run_until_complete(asyncio.gather(
QA_fetch_stock_day('000001', '2016-07-01', '2018-07-15'),
QA_fetch_stock_min('000002', '2016-07-01', '2018-07-15')
))
print(res)
# loop = asyncio.get_event_loop()
# print(id(loop))
# loop 内存地址一样 没有被销毁
| [
"pandas.DataFrame",
"asyncio.get_event_loop",
"QUANTAXIS.QAUtil.QA_util_date_stamp",
"numpy.asarray",
"QUANTAXIS.QAUtil.QA_util_date_valid",
"QUANTAXIS.QAUtil.QA_util_log_info",
"QUANTAXIS.QAUtil.QA_util_code_tolist",
"pandas.to_datetime",
"QUANTAXIS.QAUtil.QA_util_to_json_from_pandas",
"QUANTAXIS... | [((2115, 2140), 'QUANTAXIS.QAUtil.QA_util_code_tolist', 'QA_util_code_tolist', (['code'], {}), '(code)\n', (2134, 2140), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((2149, 2172), 'QUANTAXIS.QAUtil.QA_util_date_valid', 'QA_util_date_valid', (['end'], {}), '(end)\n', (2167, 2172), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((4381, 4406), 'QUANTAXIS.QAUtil.QA_util_code_tolist', 'QA_util_code_tolist', (['code'], {}), '(code)\n', (4400, 4406), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((5601, 5625), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5623, 5625), False, 'import asyncio\n'), ((3598, 3713), 'QUANTAXIS.QAUtil.QA_util_log_info', 'QA_util_log_info', (["('QA Error QA_fetch_stock_day data parameter start=%s end=%s is not right' %\n (start, end))"], {}), "(\n 'QA Error QA_fetch_stock_day data parameter start=%s end=%s is not right' %\n (start, end))\n", (3614, 3713), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((4638, 4683), 'pandas.DataFrame', 'pd.DataFrame', (['[item async for item in cursor]'], {}), '([item async for item in cursor])\n', (4650, 4683), True, 'import pandas as pd\n'), ((2486, 2531), 'pandas.DataFrame', 'pd.DataFrame', (['[item async for item in cursor]'], {}), '([item async for item in cursor])\n', (2498, 2531), True, 'import pandas as pd\n'), ((5178, 5210), 'QUANTAXIS.QAUtil.QA_util_to_json_from_pandas', 'QA_util_to_json_from_pandas', (['res'], {}), '(res)\n', (5205, 5210), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((3166, 3198), 'QUANTAXIS.QAUtil.QA_util_to_json_from_pandas', 'QA_util_to_json_from_pandas', (['res'], {}), '(res)\n', (3193, 3198), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((4507, 4532), 'QUANTAXIS.QAUtil.QA_util_time_stamp', 'QA_util_time_stamp', (['start'], {}), '(start)\n', (4525, 4532), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((4554, 4577), 'QUANTAXIS.QAUtil.QA_util_time_stamp', 'QA_util_time_stamp', (['end'], {}), '(end)\n', (4572, 4577), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((5279, 5297), 'numpy.asarray', 'numpy.asarray', (['res'], {}), '(res)\n', (5292, 5297), False, 'import numpy\n'), ((2306, 2329), 'QUANTAXIS.QAUtil.QA_util_date_stamp', 'QA_util_date_stamp', (['end'], {}), '(end)\n', (2324, 2329), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((2355, 2380), 'QUANTAXIS.QAUtil.QA_util_date_stamp', 'QA_util_date_stamp', (['start'], {}), '(start)\n', (2373, 2380), False, 'from QUANTAXIS.QAUtil import QA_Setting, QA_util_code_tolist, QA_util_date_stamp, QA_util_date_str2int, QA_util_date_valid, QA_util_dict_remove_key, QA_util_log_info, QA_util_sql_mongo_sort_DESCENDING, QA_util_time_stamp, QA_util_to_json_from_pandas, trade_date_sse\n'), ((3279, 3297), 'numpy.asarray', 'numpy.asarray', (['res'], {}), '(res)\n', (3292, 3297), False, 'import numpy\n'), ((5352, 5370), 'numpy.asarray', 'numpy.asarray', (['res'], {}), '(res)\n', (5365, 5370), False, 'import numpy\n'), ((3360, 3378), 'numpy.asarray', 'numpy.asarray', (['res'], {}), '(res)\n', (3373, 3378), False, 'import numpy\n'), ((4895, 4923), 'pandas.to_datetime', 'pd.to_datetime', (['res.datetime'], {}), '(res.datetime)\n', (4909, 4923), True, 'import pandas as pd\n'), ((2759, 2783), 'pandas.to_datetime', 'pd.to_datetime', (['res.date'], {}), '(res.date)\n', (2773, 2783), True, 'import pandas as pd\n')] |
"""
A minimal integration test to make sure the most critical parts of Verde work
as expected.
"""
import numpy.testing as npt
import pyproj
from ..datasets import fetch_california_gps
from ..spline import Spline
from ..vector import Vector
from ..trend import Trend
from ..chain import Chain
from ..model_selection import train_test_split
from ..blockreduce import BlockMean
from ..coordinates import get_region
from ..mask import distance_mask
def test_minimal_integration_2d_gps():
"Grid the 2D GPS data to make sure things don't break in obvious ways."
data = fetch_california_gps()
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean(), ellps="WGS84")
proj_coords = projection(data.longitude.values, data.latitude.values)
spacing = 12 / 60
train, test = train_test_split(
coordinates=proj_coords,
data=(data.velocity_east, data.velocity_north),
weights=(1 / data.std_east ** 2, 1 / data.std_north ** 2),
random_state=1,
)
chain = Chain(
[
("mean", BlockMean(spacing=spacing * 111e3, uncertainty=True)),
("trend", Vector([Trend(1), Trend(1)])),
("spline", Vector([Spline(damping=1e-10), Spline(damping=1e-10)])),
]
)
chain.fit(*train)
score = chain.score(*test)
npt.assert_allclose(0.99, score, atol=0.01)
# This part just makes sure there are no exceptions when calling this code.
region = get_region((data.longitude, data.latitude))
grid = chain.grid(
region=region,
spacing=spacing,
projection=projection,
dims=["latitude", "longitude"],
)
grid = distance_mask(
(data.longitude, data.latitude),
maxdist=spacing * 2 * 111e3,
grid=grid,
projection=projection,
)
| [
"numpy.testing.assert_allclose"
] | [((1313, 1356), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['(0.99)', 'score'], {'atol': '(0.01)'}), '(0.99, score, atol=0.01)\n', (1332, 1356), True, 'import numpy.testing as npt\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
from os import path as osp
import magnum as mn
import numpy as np
import pytest
import quaternion
import examples.settings
import habitat_sim
import habitat_sim.physics
from habitat_sim.utils.common import (
quat_from_angle_axis,
quat_from_magnum,
quat_to_magnum,
)
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
or not osp.exists("data/objects/"),
reason="Requires the habitat-test-scenes and habitat test objects",
)
def test_kinematics():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings[
"scene"
] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
# enable the physics simulator: also clears available actions to no-op
cfg_settings["enable_physics"] = True
cfg_settings["depth_sensor"] = True
# test loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
obj_mgr = sim.get_object_template_manager()
obj_mgr.load_configs("data/objects/", True)
assert obj_mgr.get_num_templates() > 0
# test adding an object to the world
# get handle for object 0, used to test
obj_handle_list = obj_mgr.get_template_handles("cheezit")
object_id = sim.add_object_by_handle(obj_handle_list[0])
assert len(sim.get_existing_object_ids()) > 0
# test setting the motion type
sim.set_object_motion_type(habitat_sim.physics.MotionType.STATIC, object_id)
assert (
sim.get_object_motion_type(object_id)
== habitat_sim.physics.MotionType.STATIC
)
sim.set_object_motion_type(habitat_sim.physics.MotionType.KINEMATIC, object_id)
assert (
sim.get_object_motion_type(object_id)
== habitat_sim.physics.MotionType.KINEMATIC
)
# test kinematics
I = np.identity(4)
# test get and set translation
sim.set_translation(np.array([0, 1.0, 0]), object_id)
assert np.allclose(sim.get_translation(object_id), np.array([0, 1.0, 0]))
# test object SceneNode
object_node = sim.get_object_scene_node(object_id)
assert np.allclose(sim.get_translation(object_id), object_node.translation)
# test get and set transform
sim.set_transformation(I, object_id)
assert np.allclose(sim.get_transformation(object_id), I)
# test get and set rotation
Q = quat_from_angle_axis(np.pi, np.array([0, 1.0, 0]))
expected = np.eye(4)
expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
sim.set_rotation(quat_to_magnum(Q), object_id)
assert np.allclose(sim.get_transformation(object_id), expected)
assert np.allclose(quat_from_magnum(sim.get_rotation(object_id)), Q)
# test object removal
sim.remove_object(object_id)
assert len(sim.get_existing_object_ids()) == 0
obj_handle_list = obj_mgr.get_template_handles("cheezit")
object_id = sim.add_object_by_handle(obj_handle_list[0])
prev_time = 0.0
for _ in range(2):
# do some kinematics here (todo: translating or rotating instead of absolute)
sim.set_translation(np.random.rand(3), object_id)
T = sim.get_transformation(object_id) # noqa : F841
# test getting observation
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# check that time is increasing in the world
assert sim.get_world_time() > prev_time
prev_time = sim.get_world_time()
sim.remove_object(object_id)
# test attaching/dettaching an Agent to/from physics simulation
agent_node = sim.agents[0].scene_node
obj_handle_list = obj_mgr.get_template_handles("cheezit")
object_id = sim.add_object_by_handle(obj_handle_list[0], agent_node)
sim.set_translation(np.random.rand(3), object_id)
assert np.allclose(agent_node.translation, sim.get_translation(object_id))
sim.remove_object(object_id, False) # don't delete the agent's node
assert agent_node.translation
# test get/set RigidState
object_id = sim.add_object_by_handle(obj_handle_list[0])
targetRigidState = habitat_sim.bindings.RigidState(
mn.Quaternion(), np.array([1.0, 2.0, 3.0])
)
sim.set_rigid_state(targetRigidState, object_id)
objectRigidState = sim.get_rigid_state(object_id)
assert np.allclose(objectRigidState.translation, targetRigidState.translation)
assert objectRigidState.rotation == targetRigidState.rotation
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
or not osp.exists("data/objects/"),
reason="Requires the habitat-test-scenes and habitat test objects",
)
def test_dynamics():
# This test assumes that default.phys_scene_config.json contains "physics simulator": "bullet".
# TODO: enable dynamic override of this setting in simulation config structure
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings[
"scene"
] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
# enable the physics simulator: also clears available actions to no-op
cfg_settings["enable_physics"] = True
cfg_settings["depth_sensor"] = True
# test loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
obj_mgr = sim.get_object_template_manager()
obj_mgr.load_configs("data/objects/", True)
# make the simulation deterministic (C++ seed is set in reconfigure)
np.random.seed(cfg_settings["seed"])
assert obj_mgr.get_num_templates() > 0
# test adding an object to the world
obj_handle_list = obj_mgr.get_template_handles("cheezit")
object_id = sim.add_object_by_handle(obj_handle_list[0])
object2_id = sim.add_object_by_handle(obj_handle_list[0])
# object_id = sim.add_object(1)
# object2_id = sim.add_object(1)
assert len(sim.get_existing_object_ids()) > 0
# place the objects over the table in room
sim.set_translation(np.array([-0.569043, 2.04804, 13.6156]), object_id)
sim.set_translation(np.array([-0.569043, 2.04804, 12.6156]), object2_id)
# get object MotionType and continue testing if MotionType::DYNAMIC (implies a physics implementation is active)
if (
sim.get_object_motion_type(object_id)
== habitat_sim.physics.MotionType.DYNAMIC
):
object1_init_template = sim.get_object_initialization_template(object_id)
object1_mass = object1_init_template.mass
grav = sim.get_gravity()
previous_object_states = [
[sim.get_translation(object_id), sim.get_rotation(object_id)],
[sim.get_translation(object2_id), sim.get_rotation(object2_id)],
]
prev_time = sim.get_world_time()
for _ in range(50):
# force application at a location other than the origin should always cause angular and linear motion
sim.apply_force(np.random.rand(3), np.random.rand(3), object2_id)
# TODO: expose object properties (such as mass) to python
# Counter the force of gravity on the object (it should not translate)
sim.apply_force(-grav * object1_mass, np.zeros(3), object_id)
# apply torque to the "floating" object. It should rotate, but not translate
sim.apply_torque(np.random.rand(3), object_id)
# TODO: test other physics functions
# test getting observation
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# check that time is increasing in the world
assert sim.get_world_time() > prev_time
prev_time = sim.get_world_time()
# check the object states
# 1st object should rotate, but not translate
assert np.allclose(
previous_object_states[0][0], sim.get_translation(object_id)
)
assert previous_object_states[0][1] != sim.get_rotation(object_id)
# 2nd object should rotate and translate
assert not np.allclose(
previous_object_states[1][0], sim.get_translation(object2_id)
)
assert previous_object_states[1][1] != sim.get_rotation(object2_id)
previous_object_states = [
[sim.get_translation(object_id), sim.get_rotation(object_id)],
[sim.get_translation(object2_id), sim.get_rotation(object2_id)],
]
# test setting DYNAMIC object to KINEMATIC
sim.set_object_motion_type(
habitat_sim.physics.MotionType.KINEMATIC, object2_id
)
assert (
sim.get_object_motion_type(object2_id)
== habitat_sim.physics.MotionType.KINEMATIC
)
sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))
# 2nd object should no longer rotate or translate
assert np.allclose(
previous_object_states[1][0], sim.get_translation(object2_id)
)
assert previous_object_states[1][1] == sim.get_rotation(object2_id)
sim.step_physics(0.1)
# test velocity get/set
test_lin_vel = np.array([1.0, 0.0, 0.0])
test_ang_vel = np.array([0.0, 1.0, 0.0])
# velocity setting for KINEMATIC objects won't be simulated, but will be recorded for bullet internal usage.
sim.set_linear_velocity(test_lin_vel, object2_id)
assert sim.get_linear_velocity(object2_id) == test_lin_vel
sim.set_object_motion_type(
habitat_sim.physics.MotionType.DYNAMIC, object2_id
)
sim.set_linear_velocity(test_lin_vel, object2_id)
sim.set_angular_velocity(test_ang_vel, object2_id)
assert sim.get_linear_velocity(object2_id) == test_lin_vel
assert sim.get_angular_velocity(object2_id) == test_ang_vel
# test modifying gravity
new_object_start = np.array([100.0, 0, 0])
sim.set_translation(new_object_start, object_id)
new_grav = np.array([10.0, 0, 0])
sim.set_gravity(new_grav)
assert np.allclose(sim.get_gravity(), new_grav)
assert np.allclose(sim.get_translation(object_id), new_object_start)
sim.step_physics(0.1)
assert sim.get_translation(object_id)[0] > new_object_start[0]
def test_velocity_control():
cfg_settings = examples.settings.default_sim_settings.copy()
cfg_settings["scene"] = "NONE"
cfg_settings["enable_physics"] = True
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
sim.set_gravity(np.array([0, 0, 0.0]))
obj_mgr = sim.get_object_template_manager()
template_path = osp.abspath("data/test_assets/objects/nested_box")
template_ids = obj_mgr.load_configs(template_path)
object_template = obj_mgr.get_template_by_ID(template_ids[0])
object_template.linear_damping = 0.0
object_template.angular_damping = 0.0
obj_mgr.register_template(object_template)
obj_handle = obj_mgr.get_template_handle_by_ID(template_ids[0])
for iteration in range(2):
sim.reset()
object_id = sim.add_object_by_handle(obj_handle)
vel_control = sim.get_object_velocity_control(object_id)
if iteration == 0:
if (
sim.get_object_motion_type(object_id)
!= habitat_sim.physics.MotionType.DYNAMIC
):
# Non-dynamic simulator in use. Skip 1st pass.
sim.remove_object(object_id)
continue
elif iteration == 1:
# test KINEMATIC
sim.set_object_motion_type(
habitat_sim.physics.MotionType.KINEMATIC, object_id
)
# test global velocities
vel_control.linear_velocity = np.array([1.0, 0, 0])
vel_control.angular_velocity = np.array([0, 1.0, 0])
vel_control.controlling_lin_vel = True
vel_control.controlling_ang_vel = True
while sim.get_world_time() < 1.0:
# NOTE: stepping close to default timestep to get near-constant velocity control of DYNAMIC bodies.
sim.step_physics(0.00416)
ground_truth_pos = sim.get_world_time() * vel_control.linear_velocity
assert np.allclose(
sim.get_translation(object_id), ground_truth_pos, atol=0.01
)
ground_truth_q = mn.Quaternion([[0, 0.480551, 0], 0.876967])
angle_error = mn.math.angle(ground_truth_q, sim.get_rotation(object_id))
assert angle_error < mn.Rad(0.005)
sim.reset()
# test local velocities (turn in a half circle)
vel_control.lin_vel_is_local = True
vel_control.ang_vel_is_local = True
vel_control.linear_velocity = np.array([0, 0, -math.pi])
vel_control.angular_velocity = np.array([math.pi * 2.0, 0, 0])
sim.set_translation(np.array([0, 0, 0.0]), object_id)
sim.set_rotation(mn.Quaternion(), object_id)
while sim.get_world_time() < 0.5:
# NOTE: stepping close to default timestep to get near-constant velocity control of DYNAMIC bodies.
sim.step_physics(0.008)
print(sim.get_world_time())
# NOTE: explicit integration, so expect some error
ground_truth_q = mn.Quaternion([[1.0, 0, 0], 0])
print(sim.get_translation(object_id))
assert np.allclose(
sim.get_translation(object_id), np.array([0, 1.0, 0.0]), atol=0.07
)
angle_error = mn.math.angle(ground_truth_q, sim.get_rotation(object_id))
assert angle_error < mn.Rad(0.05)
sim.remove_object(object_id)
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/apartment_1.glb"),
reason="Requires the habitat-test-scenes",
)
def test_raycast():
cfg_settings = examples.settings.default_sim_settings.copy()
# configure some settings in case defaults change
cfg_settings["scene"] = "data/scene_datasets/habitat-test-scenes/apartment_1.glb"
# enable the physics simulator
cfg_settings["enable_physics"] = True
# loading the physical scene
hab_cfg = examples.settings.make_cfg(cfg_settings)
with habitat_sim.Simulator(hab_cfg) as sim:
obj_mgr = sim.get_object_template_manager()
if (
sim.get_physics_simulation_library()
!= habitat_sim.physics.PhysicsSimulationLibrary.NONE
):
# only test this if we have a physics simulator and therefore a collision world
test_ray_1 = habitat_sim.geo.Ray()
test_ray_1.direction = mn.Vector3(1.0, 0, 0)
raycast_results = sim.cast_ray(test_ray_1)
assert raycast_results.ray.direction == test_ray_1.direction
assert raycast_results.has_hits()
assert len(raycast_results.hits) == 1
assert np.allclose(
raycast_results.hits[0].point, np.array([6.83063, 0, 0]), atol=0.07
)
assert np.allclose(
raycast_results.hits[0].normal,
np.array([-0.999587, 0.0222882, -0.0181424]),
atol=0.07,
)
assert abs(raycast_results.hits[0].ray_distance - 6.831) < 0.001
assert raycast_results.hits[0].object_id == -1
# add a primitive object to the world and test a ray away from the origin
cube_prim_handle = obj_mgr.get_template_handles("cube")[0]
cube_obj_id = sim.add_object_by_handle(cube_prim_handle)
sim.set_translation(mn.Vector3(2.0, 0, 2.0), cube_obj_id)
test_ray_1.origin = np.array([0.0, 0, 2.0])
raycast_results = sim.cast_ray(test_ray_1)
assert raycast_results.has_hits()
assert len(raycast_results.hits) == 4
assert np.allclose(
raycast_results.hits[0].point, np.array([1.89048, 0, 2]), atol=0.07
)
assert np.allclose(
raycast_results.hits[0].normal,
np.array([-0.99774, -0.0475114, -0.0475114]),
atol=0.07,
)
assert abs(raycast_results.hits[0].ray_distance - 1.89) < 0.001
assert raycast_results.hits[0].object_id == 0
# test raycast against a non-collidable object.
# should not register a hit with the object.
sim.set_object_is_collidable(False, cube_obj_id)
raycast_results = sim.cast_ray(test_ray_1)
assert raycast_results.has_hits()
assert len(raycast_results.hits) == 3
# test raycast against a non-collidable stage.
# should not register any hits.
sim.set_stage_is_collidable(False)
raycast_results = sim.cast_ray(test_ray_1)
assert not raycast_results.has_hits()
| [
"os.path.abspath",
"quaternion.as_rotation_matrix",
"numpy.random.seed",
"magnum.Rad",
"numpy.allclose",
"habitat_sim.utils.common.quat_to_magnum",
"os.path.exists",
"numpy.identity",
"habitat_sim.geo.Ray",
"numpy.zeros",
"habitat_sim.Simulator",
"numpy.array",
"numpy.random.rand",
"numpy.... | [((1182, 1212), 'habitat_sim.Simulator', 'habitat_sim.Simulator', (['hab_cfg'], {}), '(hab_cfg)\n', (1203, 1212), False, 'import habitat_sim\n'), ((2167, 2181), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2178, 2181), True, 'import numpy as np\n'), ((2809, 2818), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2815, 2818), True, 'import numpy as np\n'), ((2848, 2880), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', (['Q'], {}), '(Q)\n', (2877, 2880), False, 'import quaternion\n'), ((4796, 4867), 'numpy.allclose', 'np.allclose', (['objectRigidState.translation', 'targetRigidState.translation'], {}), '(objectRigidState.translation, targetRigidState.translation)\n', (4807, 4867), True, 'import numpy as np\n'), ((5796, 5826), 'habitat_sim.Simulator', 'habitat_sim.Simulator', (['hab_cfg'], {}), '(hab_cfg)\n', (5817, 5826), False, 'import habitat_sim\n'), ((6024, 6060), 'numpy.random.seed', 'np.random.seed', (["cfg_settings['seed']"], {}), "(cfg_settings['seed'])\n", (6038, 6060), True, 'import numpy as np\n'), ((11424, 11454), 'habitat_sim.Simulator', 'habitat_sim.Simulator', (['hab_cfg'], {}), '(hab_cfg)\n', (11445, 11454), False, 'import habitat_sim\n'), ((11587, 11637), 'os.path.abspath', 'osp.abspath', (['"""data/test_assets/objects/nested_box"""'], {}), "('data/test_assets/objects/nested_box')\n", (11598, 11637), True, 'from os import path as osp\n'), ((15317, 15347), 'habitat_sim.Simulator', 'habitat_sim.Simulator', (['hab_cfg'], {}), '(hab_cfg)\n', (15338, 15347), False, 'import habitat_sim\n'), ((14795, 14864), 'os.path.exists', 'osp.exists', (['"""data/scene_datasets/habitat-test-scenes/apartment_1.glb"""'], {}), "('data/scene_datasets/habitat-test-scenes/apartment_1.glb')\n", (14805, 14864), True, 'from os import path as osp\n'), ((2250, 2271), 'numpy.array', 'np.array', (['[0, 1.0, 0]'], {}), '([0, 1.0, 0])\n', (2258, 2271), True, 'import numpy as np\n'), ((2343, 2364), 'numpy.array', 'np.array', (['[0, 1.0, 0]'], {}), '([0, 1.0, 0])\n', (2351, 2364), True, 'import numpy as np\n'), ((2767, 2788), 'numpy.array', 'np.array', (['[0, 1.0, 0]'], {}), '([0, 1.0, 0])\n', (2775, 2788), True, 'import numpy as np\n'), ((2906, 2923), 'habitat_sim.utils.common.quat_to_magnum', 'quat_to_magnum', (['Q'], {}), '(Q)\n', (2920, 2923), False, 'from habitat_sim.utils.common import quat_from_angle_axis, quat_from_magnum, quat_to_magnum\n'), ((4213, 4230), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (4227, 4230), True, 'import numpy as np\n'), ((4613, 4628), 'magnum.Quaternion', 'mn.Quaternion', ([], {}), '()\n', (4626, 4628), True, 'import magnum as mn\n'), ((4630, 4655), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (4638, 4655), True, 'import numpy as np\n'), ((537, 612), 'os.path.exists', 'osp.exists', (['"""data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"""'], {}), "('data/scene_datasets/habitat-test-scenes/skokloster-castle.glb')\n", (547, 612), True, 'from os import path as osp\n'), ((624, 651), 'os.path.exists', 'osp.exists', (['"""data/objects/"""'], {}), "('data/objects/')\n", (634, 651), True, 'from os import path as osp\n'), ((6566, 6605), 'numpy.array', 'np.array', (['[-0.569043, 2.04804, 13.6156]'], {}), '([-0.569043, 2.04804, 13.6156])\n', (6574, 6605), True, 'import numpy as np\n'), ((6646, 6685), 'numpy.array', 'np.array', (['[-0.569043, 2.04804, 12.6156]'], {}), '([-0.569043, 2.04804, 12.6156])\n', (6654, 6685), True, 'import numpy as np\n'), ((9975, 10000), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (9983, 10000), True, 'import numpy as np\n'), ((10028, 10053), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (10036, 10053), True, 'import numpy as np\n'), ((10768, 10791), 'numpy.array', 'np.array', (['[100.0, 0, 0]'], {}), '([100.0, 0, 0])\n', (10776, 10791), True, 'import numpy as np\n'), ((10876, 10898), 'numpy.array', 'np.array', (['[10.0, 0, 0]'], {}), '([10.0, 0, 0])\n', (10884, 10898), True, 'import numpy as np\n'), ((4969, 5044), 'os.path.exists', 'osp.exists', (['"""data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"""'], {}), "('data/scene_datasets/habitat-test-scenes/skokloster-castle.glb')\n", (4979, 5044), True, 'from os import path as osp\n'), ((5056, 5083), 'os.path.exists', 'osp.exists', (['"""data/objects/"""'], {}), "('data/objects/')\n", (5066, 5083), True, 'from os import path as osp\n'), ((11487, 11508), 'numpy.array', 'np.array', (['[0, 0, 0.0]'], {}), '([0, 0, 0.0])\n', (11495, 11508), True, 'import numpy as np\n'), ((12789, 12810), 'numpy.array', 'np.array', (['[1.0, 0, 0]'], {}), '([1.0, 0, 0])\n', (12797, 12810), True, 'import numpy as np\n'), ((12854, 12875), 'numpy.array', 'np.array', (['[0, 1.0, 0]'], {}), '([0, 1.0, 0])\n', (12862, 12875), True, 'import numpy as np\n'), ((13417, 13460), 'magnum.Quaternion', 'mn.Quaternion', (['[[0, 0.480551, 0], 0.876967]'], {}), '([[0, 0.480551, 0], 0.876967])\n', (13430, 13460), True, 'import magnum as mn\n'), ((13817, 13843), 'numpy.array', 'np.array', (['[0, 0, -math.pi]'], {}), '([0, 0, -math.pi])\n', (13825, 13843), True, 'import numpy as np\n'), ((13887, 13918), 'numpy.array', 'np.array', (['[math.pi * 2.0, 0, 0]'], {}), '([math.pi * 2.0, 0, 0])\n', (13895, 13918), True, 'import numpy as np\n'), ((14380, 14411), 'magnum.Quaternion', 'mn.Quaternion', (['[[1.0, 0, 0], 0]'], {}), '([[1.0, 0, 0], 0])\n', (14393, 14411), True, 'import magnum as mn\n'), ((15664, 15685), 'habitat_sim.geo.Ray', 'habitat_sim.geo.Ray', ([], {}), '()\n', (15683, 15685), False, 'import habitat_sim\n'), ((15721, 15742), 'magnum.Vector3', 'mn.Vector3', (['(1.0)', '(0)', '(0)'], {}), '(1.0, 0, 0)\n', (15731, 15742), True, 'import magnum as mn\n'), ((16746, 16769), 'numpy.array', 'np.array', (['[0.0, 0, 2.0]'], {}), '([0.0, 0, 2.0])\n', (16754, 16769), True, 'import numpy as np\n'), ((3514, 3531), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (3528, 3531), True, 'import numpy as np\n'), ((13579, 13592), 'magnum.Rad', 'mn.Rad', (['(0.005)'], {}), '(0.005)\n', (13585, 13592), True, 'import magnum as mn\n'), ((13952, 13973), 'numpy.array', 'np.array', (['[0, 0, 0.0]'], {}), '([0, 0, 0.0])\n', (13960, 13973), True, 'import numpy as np\n'), ((14015, 14030), 'magnum.Quaternion', 'mn.Quaternion', ([], {}), '()\n', (14028, 14030), True, 'import magnum as mn\n'), ((14542, 14565), 'numpy.array', 'np.array', (['[0, 1.0, 0.0]'], {}), '([0, 1.0, 0.0])\n', (14550, 14565), True, 'import numpy as np\n'), ((14709, 14721), 'magnum.Rad', 'mn.Rad', (['(0.05)'], {}), '(0.05)\n', (14715, 14721), True, 'import magnum as mn\n'), ((16046, 16071), 'numpy.array', 'np.array', (['[6.83063, 0, 0]'], {}), '([6.83063, 0, 0])\n', (16054, 16071), True, 'import numpy as np\n'), ((16193, 16237), 'numpy.array', 'np.array', (['[-0.999587, 0.0222882, -0.0181424]'], {}), '([-0.999587, 0.0222882, -0.0181424])\n', (16201, 16237), True, 'import numpy as np\n'), ((16675, 16698), 'magnum.Vector3', 'mn.Vector3', (['(2.0)', '(0)', '(2.0)'], {}), '(2.0, 0, 2.0)\n', (16685, 16698), True, 'import magnum as mn\n'), ((17002, 17027), 'numpy.array', 'np.array', (['[1.89048, 0, 2]'], {}), '([1.89048, 0, 2])\n', (17010, 17027), True, 'import numpy as np\n'), ((17149, 17193), 'numpy.array', 'np.array', (['[-0.99774, -0.0475114, -0.0475114]'], {}), '([-0.99774, -0.0475114, -0.0475114])\n', (17157, 17193), True, 'import numpy as np\n'), ((7566, 7583), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (7580, 7583), True, 'import numpy as np\n'), ((7585, 7602), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (7599, 7602), True, 'import numpy as np\n'), ((7832, 7843), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7840, 7843), True, 'import numpy as np\n'), ((7983, 8000), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (7997, 8000), True, 'import numpy as np\n')] |
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
from onnx_test_runner import OnnxTestRunner
import numpy as np
def _make_module(in_shape, expand_shape, value_format):
inputs = []
outputs = []
initializers = []
nodes = []
# input
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
inputs.append('input')
# output
out = np.ones(in_shape) * np.ones(expand_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, out.shape)
outputs.append('output')
# shape
shape_tensor = helper.make_tensor(
'shape',
TensorProto.INT64,
dims=[len(expand_shape)],
vals=expand_shape
)
if value_format == 'initializer':
initializers.append(shape_tensor)
else:
shape_node = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_tensor
)
nodes.append(shape_node)
inputs.append('shape')
# Expand
expand = onnx.helper.make_node(
'Expand',
inputs=inputs,
outputs=outputs,
)
nodes.append(expand)
graph_def = helper.make_graph(
nodes,
'test-model',
[input],
[output],
initializer=initializers
)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
in_shapes = [
[3, 1]
]
expand_shapes = [
[1],
[1, 1],
[3, 4],
[2, 1, 6]
]
value_formats = [
['initializer'],
['constant']
]
@pytest.mark.parametrize('in_shape', in_shapes)
@pytest.mark.parametrize('expand_shape', expand_shapes)
@pytest.mark.parametrize('value_format', value_formats)
def test_expand(in_shape, expand_shape, value_format, request):
model_def = _make_module(in_shape, expand_shape, value_format)
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_expand.py'])
| [
"onnx.helper.make_node",
"onnx.helper.make_model",
"onnx.helper.make_tensor_value_info",
"numpy.ones",
"pytest.main",
"onnx_test_runner.OnnxTestRunner",
"pytest.mark.parametrize",
"onnx.helper.make_graph"
] | [((2269, 2315), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_shape"""', 'in_shapes'], {}), "('in_shape', in_shapes)\n", (2292, 2315), False, 'import pytest\n'), ((2317, 2371), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expand_shape"""', 'expand_shapes'], {}), "('expand_shape', expand_shapes)\n", (2340, 2371), False, 'import pytest\n'), ((2373, 2427), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value_format"""', 'value_formats'], {}), "('value_format', value_formats)\n", (2396, 2427), False, 'import pytest\n'), ((978, 1045), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input"""', 'TensorProto.FLOAT', 'in_shape'], {}), "('input', TensorProto.FLOAT, in_shape)\n", (1007, 1045), False, 'from onnx import helper\n'), ((1152, 1221), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output"""', 'TensorProto.FLOAT', 'out.shape'], {}), "('output', TensorProto.FLOAT, out.shape)\n", (1181, 1221), False, 'from onnx import helper\n'), ((1749, 1812), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Expand"""'], {'inputs': 'inputs', 'outputs': 'outputs'}), "('Expand', inputs=inputs, outputs=outputs)\n", (1770, 1812), False, 'import onnx\n'), ((1886, 1974), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test-model"""', '[input]', '[output]'], {'initializer': 'initializers'}), "(nodes, 'test-model', [input], [output], initializer=\n initializers)\n", (1903, 1974), False, 'from onnx import helper\n'), ((2033, 2087), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""kendryte"""'}), "(graph_def, producer_name='kendryte')\n", (2050, 2087), False, 'from onnx import helper\n'), ((2573, 2606), 'onnx_test_runner.OnnxTestRunner', 'OnnxTestRunner', (['request.node.name'], {}), '(request.node.name)\n', (2587, 2606), False, 'from onnx_test_runner import OnnxTestRunner\n'), ((2719, 2757), 'pytest.main', 'pytest.main', (["['-vv', 'test_expand.py']"], {}), "(['-vv', 'test_expand.py'])\n", (2730, 2757), False, 'import pytest\n'), ((1097, 1114), 'numpy.ones', 'np.ones', (['in_shape'], {}), '(in_shape)\n', (1104, 1114), True, 'import numpy as np\n'), ((1117, 1138), 'numpy.ones', 'np.ones', (['expand_shape'], {}), '(expand_shape)\n', (1124, 1138), True, 'import numpy as np\n'), ((1525, 1603), 'onnx.helper.make_node', 'helper.make_node', (['"""Constant"""'], {'inputs': '[]', 'outputs': "['shape']", 'value': 'shape_tensor'}), "('Constant', inputs=[], outputs=['shape'], value=shape_tensor)\n", (1541, 1603), False, 'from onnx import helper\n')] |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
X,Y = np.meshgrid(np.linspace(-5,5,100),np.linspace(-5,5,100))
Z = np.exp(-(X**2-X*Y+Y**2)/2)
map = plt.get_cmap('viridis')
fig = plt.figure()
ax = fig.add_subplot(122)
ax.set_aspect('equal')
ax.contour(X,Y,Z,10,cmap=map)
ax2 = fig.add_subplot(121,projection='3d')
ax2.plot_surface(X,Y,Z,cmap=map, linewidth=0)
fig.suptitle('Multivariate Gaussian')
fig.savefig('../img/ellipse.png')
| [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.get_cmap",
"numpy.exp",
"numpy.linspace"
] | [((185, 223), 'numpy.exp', 'np.exp', (['(-(X ** 2 - X * Y + Y ** 2) / 2)'], {}), '(-(X ** 2 - X * Y + Y ** 2) / 2)\n', (191, 223), True, 'import numpy as np\n'), ((218, 241), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (230, 241), True, 'import matplotlib.pyplot as plt\n'), ((248, 260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (258, 260), True, 'import matplotlib.pyplot as plt\n'), ((136, 159), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (147, 159), True, 'import numpy as np\n'), ((158, 181), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (169, 181), True, 'import numpy as np\n')] |
'''
Authors: <NAME>.
Copyright:
Copyright (c) 2018 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
**
Original code from https://github.com/deep-diver/CIFAR10-img-classification-tensorflow
Modified for our purposes.
**
'''
import os, sys
import pickle
import random
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
preProcessedImgSaveFolderConst = './PreProcessedImages'
def load_label_names():
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
# note the encoding type is 'latin1'
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id, savepng=False, showfig=False):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch #{}:'.format(batch_id))
print('# of Samples: {}\n'.format(len(features)))
label_names = load_label_names()
label_counts = dict(zip(*np.unique(labels, return_counts=True)))
for key, value in label_counts.items():
print('Label Counts of [{}]({}) : {}'.format(key, label_names[key].upper(), value))
sample_image = features[sample_id]
sample_label = labels[sample_id]
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
if savepng or showfig:
# Save/show a .png file for the current image
plt.imshow(sample_image)
if savepng:
plt.savefig('foo.png')
elif showfig:
plt.show()
def normalize(x):
"""
argument
- x: input image data in numpy array [32, 32, 3]
return
- normalized x
"""
min_val = np.min(x)
max_val = np.max(x)
x = (x-min_val) / (max_val-min_val)
return x
def one_hot_encode(x):
"""
argument
- x: a list of labels
return
- one hot encoding matrix (number of labels, number of class)
"""
encoded = np.zeros((len(x), 10))
for idx, val in enumerate(x):
encoded[idx][val] = 1
return encoded
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
# Saved files are 'preprocess_batch_' + str(batch_i) + '.p',
# 'preprocess_validation.p',
# 'preprocess_testing.p'
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode, preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
# find index to be the point as validation data in the whole dataset of the batch (10%)
index_of_validation = int(len(features) * 0.1)
# preprocess the 90% of the whole dataset of the batch
# - normalize the features
# - one_hot_encode the lables
# - save in a new file named, "preprocess_batch_" + batch_number
# - each file for each batch
_preprocess_and_save(normalize, one_hot_encode,
features[:-index_of_validation], labels[:-index_of_validation],
os.path.join(preProcessedImgSaveFolder, 'preprocess_batch_' + str(batch_i) + '.p'))
# unlike the training dataset, validation dataset will be added through all batch dataset
# - take 10% of the whold dataset of the batch
# - add them into a list of
# - valid_features
# - valid_labels
valid_features.extend(features[-index_of_validation:])
valid_labels.extend(labels[-index_of_validation:])
# preprocess the all stacked validation dataset
_preprocess_and_save(normalize, one_hot_encode,
np.array(valid_features), np.array(valid_labels),
os.path.join(preProcessedImgSaveFolder, 'preprocess_validation.p'))
# load the test dataset
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# preprocess the testing data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all testing data
_preprocess_and_save(normalize, one_hot_encode,
np.array(test_features), np.array(test_labels),
os.path.join(preProcessedImgSaveFolder, 'preprocess_testing.p'))
def get_one_sample_point(batch_id, sample_id, preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
features, labels = pickle.load(open(os.path.join(preProcessedImgSaveFolder, 'preprocess_batch_' + str(batch_id) + '.p'), mode='rb'))
return (features[sample_id], labels[sample_id])
def get_sample_points(batch_id, start_id, end_id, preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
features, labels = pickle.load(open(os.path.join(preProcessedImgSaveFolder, 'preprocess_batch_' + str(batch_id) + '.p'), mode='rb'))
return (features[start_id:end_id], labels[start_id:end_id])
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size, preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = os.path.join(preProcessedImgSaveFolder, 'preprocess_batch_' + str(batch_id) + '.p')
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def load_preprocess_training_data(batch_id, preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
filename = os.path.join(preProcessedImgSaveFolder, 'preprocess_batch_' + str(batch_id) + '.p')
features, labels = pickle.load(open(filename, mode='rb'))
return features, labels
def load_preprocess_validation_data(preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
valid_features, valid_labels = pickle.load(open(os.path.join(preProcessedImgSaveFolder, 'preprocess_validation.p'), mode='rb'))
return valid_features, valid_labels
def load_preprocess_testing_data(preProcessedImgSaveFolder = preProcessedImgSaveFolderConst):
testing_features, testing_labels = pickle.load(open(os.path.join(preProcessedImgSaveFolder, 'preprocess_testing.p'), mode='rb'))
return testing_features, testing_labels
def main():
cifar10_dataset_folder_path = '../../HelperScripts/CIFAR10/cifar-10-batches-py'
preProcessedImgSaveFolder = './PreProcessedImages'
preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
display_stats(cifar10_dataset_folder_path, 2, 4555)
print(get_one_sample_point(2, 4555))
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.unique",
"numpy.min",
"pickle.load",
"numpy.array",
"numpy.max",
"os.path.join",
"matplotlib.pyplot.savefig"
] | [((3486, 3495), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3492, 3495), True, 'import numpy as np\n'), ((3511, 3520), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3517, 3520), True, 'import numpy as np\n'), ((1791, 1827), 'pickle.load', 'pickle.load', (['file'], {'encoding': '"""latin1"""'}), "(file, encoding='latin1')\n", (1802, 1827), False, 'import pickle\n'), ((3179, 3203), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample_image'], {}), '(sample_image)\n', (3189, 3203), True, 'from matplotlib import pyplot as plt\n'), ((5790, 5814), 'numpy.array', 'np.array', (['valid_features'], {}), '(valid_features)\n', (5798, 5814), True, 'import numpy as np\n'), ((5816, 5838), 'numpy.array', 'np.array', (['valid_labels'], {}), '(valid_labels)\n', (5824, 5838), True, 'import numpy as np\n'), ((5866, 5932), 'os.path.join', 'os.path.join', (['preProcessedImgSaveFolder', '"""preprocess_validation.p"""'], {}), "(preProcessedImgSaveFolder, 'preprocess_validation.p')\n", (5878, 5932), False, 'import os, sys\n'), ((6062, 6098), 'pickle.load', 'pickle.load', (['file'], {'encoding': '"""latin1"""'}), "(file, encoding='latin1')\n", (6073, 6098), False, 'import pickle\n'), ((6394, 6417), 'numpy.array', 'np.array', (['test_features'], {}), '(test_features)\n', (6402, 6417), True, 'import numpy as np\n'), ((6419, 6440), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (6427, 6440), True, 'import numpy as np\n'), ((6468, 6531), 'os.path.join', 'os.path.join', (['preProcessedImgSaveFolder', '"""preprocess_testing.p"""'], {}), "(preProcessedImgSaveFolder, 'preprocess_testing.p')\n", (6480, 6531), False, 'import os, sys\n'), ((3238, 3260), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""foo.png"""'], {}), "('foo.png')\n", (3249, 3260), True, 'from matplotlib import pyplot as plt\n'), ((8421, 8487), 'os.path.join', 'os.path.join', (['preProcessedImgSaveFolder', '"""preprocess_validation.p"""'], {}), "(preProcessedImgSaveFolder, 'preprocess_validation.p')\n", (8433, 8487), False, 'import os, sys\n'), ((8696, 8759), 'os.path.join', 'os.path.join', (['preProcessedImgSaveFolder', '"""preprocess_testing.p"""'], {}), "(preProcessedImgSaveFolder, 'preprocess_testing.p')\n", (8708, 8759), False, 'import os, sys\n'), ((2522, 2559), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (2531, 2559), True, 'import numpy as np\n'), ((3297, 3307), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3305, 3307), True, 'from matplotlib import pyplot as plt\n')] |
# -*-coding:utf8-*-
"""
personal rank主题类
author:zhangyu
email:<EMAIL>
"""
from __future__ import division
import sys
sys.path.append("../util")
import PR.util.read as read
import operator
import PR.util.mat_util as mat_util
from scipy.sparse.linalg import gmres
import numpy as np
def personal_rank(graph, root, alpha, iter_num, recom_num=10):
"""
Args
graph: 用户特征图
root: 固定用户推荐哪个
alpha: 随机走动
iter_num:迭代数量
recom_num: 推荐商品数量
Return:
字典
"""
rank = {}
rank = {point: 0 for point in graph}
rank[root] = 1
recom_result = {}
for iter_index in range(iter_num):
tmp_rank = {}
tmp_rank = {point: 0 for point in graph}
for out_point, out_dict in graph.items():
for inner_point, value in graph[out_point].items():
tmp_rank[inner_point] += round(alpha * rank[out_point] / len(out_dict), 4)
if inner_point == root:
tmp_rank[inner_point] += round(1 - alpha, 4)
if tmp_rank == rank:
print("out" + str(iter_index))
break
rank = tmp_rank
right_num = 0
for zuhe in sorted(rank.iteritems(), key=operator.itemgetter(1), reverse=True):
point, pr_score = zuhe[0], zuhe[1]
if len(point.split('_')) < 2:
continue
if point in graph[root]:
continue
recom_result[point] = round(pr_score, 4)
right_num += 1
if right_num > recom_num:
break
return recom_result
def personal_rank_mat(graph, root, alpha, recom_num=10):
"""
Args
graph: 用户特征图
root: 固定用户推荐哪个
alpha: 随机走动
iter_num:迭代数量
recom_num: 推荐商品数量
Return:
字典
"""
m, vertex, address_dict = mat_util.graph_to_m(graph)
if root not in address_dict:
return {}
score_dict = {}
recom_dict = {}
mat_all = mat_util.mat_all_point(m, vertex, alpha)
index = address_dict[root]
initial_list = [[0] for row in range(len(vertex))]
initial_list[index] = [1]
r_zero = np.array(initial_list)
res = gmres(mat_all, r_zero, tol=1e-8)[0]
for index in range(len(res)):
point = vertex[index]
if len(point.strip().split("_")) < 2:
continue
if point in graph[root]:
continue
score_dict[point] = round(res[index], 3)
for zuhe in sorted(score_dict.iteritems(), key=operator.itemgetter(1), reverse=True)[:recom_num]:
point, score = zuhe[0], zuhe[1]
recom_dict[point] = score
return recom_dict
def get_one_user_recom():
"""
获取用户推荐结果
"""
user = "1"
alpha = 0.8
graph = read.get_graph_from_data("../data/ratings.txt")
iter_num = 100
recom_result = personal_rank(graph, user, alpha, iter_num, 100)
return recom_result
def get_one_user_by_mat():
"""
获取固定用户
"""
user = "1"
alpha = 0.8
graph = read.get_graph_from_data("../data/ratings.txt")
recom_result = personal_rank_mat(graph, user, alpha, 100)
return recom_result
if __name__ == "__main__":
recom_result_base = get_one_user_recom()
recom_result_mat = get_one_user_by_mat()
| [
"sys.path.append",
"PR.util.read.get_graph_from_data",
"numpy.array",
"scipy.sparse.linalg.gmres",
"PR.util.mat_util.mat_all_point",
"PR.util.mat_util.graph_to_m",
"operator.itemgetter"
] | [((120, 146), 'sys.path.append', 'sys.path.append', (['"""../util"""'], {}), "('../util')\n", (135, 146), False, 'import sys\n'), ((1805, 1831), 'PR.util.mat_util.graph_to_m', 'mat_util.graph_to_m', (['graph'], {}), '(graph)\n', (1824, 1831), True, 'import PR.util.mat_util as mat_util\n'), ((1937, 1977), 'PR.util.mat_util.mat_all_point', 'mat_util.mat_all_point', (['m', 'vertex', 'alpha'], {}), '(m, vertex, alpha)\n', (1959, 1977), True, 'import PR.util.mat_util as mat_util\n'), ((2107, 2129), 'numpy.array', 'np.array', (['initial_list'], {}), '(initial_list)\n', (2115, 2129), True, 'import numpy as np\n'), ((2712, 2759), 'PR.util.read.get_graph_from_data', 'read.get_graph_from_data', (['"""../data/ratings.txt"""'], {}), "('../data/ratings.txt')\n", (2736, 2759), True, 'import PR.util.read as read\n'), ((2974, 3021), 'PR.util.read.get_graph_from_data', 'read.get_graph_from_data', (['"""../data/ratings.txt"""'], {}), "('../data/ratings.txt')\n", (2998, 3021), True, 'import PR.util.read as read\n'), ((2140, 2173), 'scipy.sparse.linalg.gmres', 'gmres', (['mat_all', 'r_zero'], {'tol': '(1e-08)'}), '(mat_all, r_zero, tol=1e-08)\n', (2145, 2173), False, 'from scipy.sparse.linalg import gmres\n'), ((1203, 1225), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1222, 1225), False, 'import operator\n'), ((2461, 2483), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2480, 2483), False, 'import operator\n')] |
import numpy
try:
import matplotlib.pyplot as pypl
plotting = True
except:
plotting = False
import os,time
this_dir = os.path.dirname(os.path.realpath(__file__))
import condor
import logging
logger = logging.getLogger("condor")
#logger.setLevel("DEBUG")
#logger.setLevel("WARNING")
logger.setLevel("INFO")
N = 1
rotation_formalism="random"
rotation_values = None
# Source
src = condor.Source(wavelength=0.147E-9, pulse_energy=1E-3, focus_diameter=1E-6)
# Detector
det = condor.Detector(distance=0.9, pixel_size=400E-6, nx=250, ny=250)
# Map
#print("Simulating map")
par = condor.ParticleMap(diameter=None, material_type="poliovirus", geometry="custom",
emd_id="1144",
rotation_formalism=rotation_formalism, rotation_values=rotation_values)
s = "particle_map"
E = condor.Experiment(src, {s : par}, det)
W = condor.utils.cxiwriter.CXIWriter("./condor.cxi")
for i in range(N):
t = time.time()
res = E.propagate()
#print(time.time()-t)
if plotting:
real_space = numpy.fft.fftshift(numpy.fft.ifftn(res["entry_1"]["data_1"]["data_fourier"]))
pypl.imsave(this_dir + "/simple_test_%s_%i.png" % (s,i), numpy.log10(res["entry_1"]["data_1"]["data"]))
pypl.imsave(this_dir + "/simple_test_%s_%i_rs.png" % (s,i), abs(real_space))
W.write(res)
W.close()
| [
"condor.utils.cxiwriter.CXIWriter",
"os.path.realpath",
"condor.Source",
"logging.getLogger",
"condor.Experiment",
"time.time",
"numpy.fft.ifftn",
"numpy.log10",
"condor.Detector",
"condor.ParticleMap"
] | [((216, 243), 'logging.getLogger', 'logging.getLogger', (['"""condor"""'], {}), "('condor')\n", (233, 243), False, 'import logging\n'), ((396, 472), 'condor.Source', 'condor.Source', ([], {'wavelength': '(1.47e-10)', 'pulse_energy': '(0.001)', 'focus_diameter': '(1e-06)'}), '(wavelength=1.47e-10, pulse_energy=0.001, focus_diameter=1e-06)\n', (409, 472), False, 'import condor\n'), ((488, 552), 'condor.Detector', 'condor.Detector', ([], {'distance': '(0.9)', 'pixel_size': '(0.0004)', 'nx': '(250)', 'ny': '(250)'}), '(distance=0.9, pixel_size=0.0004, nx=250, ny=250)\n', (503, 552), False, 'import condor\n'), ((590, 766), 'condor.ParticleMap', 'condor.ParticleMap', ([], {'diameter': 'None', 'material_type': '"""poliovirus"""', 'geometry': '"""custom"""', 'emd_id': '"""1144"""', 'rotation_formalism': 'rotation_formalism', 'rotation_values': 'rotation_values'}), "(diameter=None, material_type='poliovirus', geometry=\n 'custom', emd_id='1144', rotation_formalism=rotation_formalism,\n rotation_values=rotation_values)\n", (608, 766), False, 'import condor\n'), ((831, 868), 'condor.Experiment', 'condor.Experiment', (['src', '{s: par}', 'det'], {}), '(src, {s: par}, det)\n', (848, 868), False, 'import condor\n'), ((875, 923), 'condor.utils.cxiwriter.CXIWriter', 'condor.utils.cxiwriter.CXIWriter', (['"""./condor.cxi"""'], {}), "('./condor.cxi')\n", (907, 923), False, 'import condor\n'), ((148, 174), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (164, 174), False, 'import os, time\n'), ((951, 962), 'time.time', 'time.time', ([], {}), '()\n', (960, 962), False, 'import os, time\n'), ((1070, 1127), 'numpy.fft.ifftn', 'numpy.fft.ifftn', (["res['entry_1']['data_1']['data_fourier']"], {}), "(res['entry_1']['data_1']['data_fourier'])\n", (1085, 1127), False, 'import numpy\n'), ((1194, 1239), 'numpy.log10', 'numpy.log10', (["res['entry_1']['data_1']['data']"], {}), "(res['entry_1']['data_1']['data'])\n", (1205, 1239), False, 'import numpy\n')] |
"""
Module to interface with data loader.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from tbase.skeleton import Skeleton
def to_3_channels_rep(data):
"""
Converts the input in shape (batch_size, 73, width) to (batch_size, 22 + 7, width, 3). Can also handle missing
foot contacts, in which case the input shape is (batch_size, 69, width)
"""
assert data.shape[1] >= 69 and (len(data.shape) == 3 or len(data.shape) == 4)
seq_length = data.shape[2]
batch_size = data.shape[0]
n_joints = len(Skeleton.ALL_JOINTS)
data_c = np.copy(data)
pose = data_c[:, :n_joints*3, ...]
rest = data_c[:, n_joints*3:, ...]
# convert pose from (batch_size, 66, seq_length) to (batch_size, 22, seq_length, 3)
pose_r = np.reshape(pose, [batch_size, n_joints, 3, seq_length])
pose_3 = np.transpose(pose_r, [0, 1, 3, 2])
# zero pad channels for remaining data
zeros = np.zeros(rest.shape + (2,))
rest_concat = np.concatenate([np.expand_dims(rest, -1), zeros], axis=3)
# paste back together
converted = np.concatenate([pose_3, rest_concat], axis=1)
assert converted.shape[3] == 3 and converted.shape[2] == seq_length and converted.shape[0] == batch_size
assert converted.shape[1] == n_joints + rest.shape[1]
return converted
def to_1_channel_rep(data):
"""
The inverse of `to_3_channels_rep`.
"""
if len(data.shape) == 3 and data.shape[1] >= 69:
# this is already the required format
return data
if len(data.shape) == 4 and data.shape[3] == 1:
# this has only one channel so just remove that
return np.squeeze(data, -1)
seq_length = data.shape[2]
batch_size = data.shape[0]
n_joints = len(Skeleton.ALL_JOINTS)
assert len(data.shape) == 4 and data.shape[3] == 3 and (data.shape[1] == n_joints + 7 or data.shape[1] == n_joints + 3)
data_c = np.copy(data)
pose = data_c[:, :n_joints, ...]
rest = data_c[:, n_joints:, ...]
# convert pose from (batch_size, 22, seq_length, 3) to (batch_size, 66, seq_length)
pose_r = np.transpose(pose, [0, 1, 3, 2])
pose_1 = np.reshape(pose_r, [batch_size, n_joints*3, seq_length])
# get rest of the data, i.e. drop unused channels
rest_1 = rest[:, :, :, 0]
# paste back together
converted = np.concatenate([pose_1, rest_1], axis=1)
assert converted.shape[0] == batch_size and converted.shape[1] == 66 + rest.shape[1] and converted.shape[2] == seq_length
return converted
def remove_foot_contacts(data):
assert data.shape[1] == 73
return np.delete(data, obj=list(range(data.shape[1] - 4, data.shape[1])), axis=1)
class Databases(object):
"""
Helper class to define names of available motion capture databases.
"""
CMU = 'data_cmu.npz'
HDM05 = 'data_hdm05.npz'
MHAD = 'data_mhad.npz'
EDIN_LOCOMOTION = 'data_edin_locomotion.npz'
EDIN_XSENS = 'data_edin_xsens.npz'
EDIN_MISC = 'data_edin_misc.npz'
EDIN_PUNCHING = 'data_edin_punching.npz'
# helper dict to retrieve database files also from strings
MAPPING = {'cmu': CMU, 'hdm05': HDM05, 'mhad': MHAD, 'edin_locomotion': EDIN_LOCOMOTION,
'edin_xsens': EDIN_XSENS, 'edin_misc': EDIN_MISC, 'edin_punching': EDIN_PUNCHING}
@classmethod
def from_str(cls, name):
"""Retrieves the name of the DB file for a given string."""
n = name.lower()
return cls.MAPPING[n]
class LoaderV2(object):
"""
Loader that can handle data we preprocessed ourselves, i.e. not using Holden data directly.
"""
def __init__(self, train_path, valid_path, normalizer=None, discard_foot_contacts=False):
self.train_path = train_path
self.valid_path = valid_path
self.splits_path = [self.train_path, self.valid_path]
self.normalizer = normalizer
self.discard_foot_contacts = discard_foot_contacts
self.all_dbs = [Databases.CMU, Databases.HDM05, Databases.MHAD, Databases.EDIN_LOCOMOTION,
Databases.EDIN_XSENS, Databases.EDIN_MISC, Databases.EDIN_PUNCHING]
def _get_split_unnormalized(self, db_name, split):
data = np.load(os.path.join(self.splits_path[split], db_name))['clips']
data = np.swapaxes(data, 1, 2)
data = self.remove_foot_contact_info(data)
return data
def get_training_unnormalized(self, db_name):
return self._get_split_unnormalized(db_name, split=0)
def get_validation_unnormalized(self, db_name):
return self._get_split_unnormalized(db_name, split=1)
def get_data_unnormalized(self, db_name):
return self.get_training_unnormalized(db_name), self.get_validation_unnormalized(db_name)
def get_validation_unnormalized_all(self):
data = []
for db in self.all_dbs:
data.append(self.get_validation_unnormalized(db))
return np.concatenate(data, axis=0)
def get_training_unnormalized_all(self):
data = []
for db in self.all_dbs:
data.append(self.get_training_unnormalized(db))
return np.concatenate(data, axis=0)
def get_data_unnormalized_all(self):
return self.get_training_unnormalized_all(), self.get_validation_unnormalized_all()
def load_training_all(self):
data = self.get_training_unnormalized_all()
return self.normalizer.normalize(data)
def load_validation_all(self):
data = self.get_validation_unnormalized_all()
return self.normalizer.normalize(data)
def load_all(self):
# Note that it is important that the training data is loaded first, because this function internally computes
# the normalization statistics which are then reused for all subsequent calls.
data_train_n = self.load_training_all()
data_valid_n = self.load_validation_all()
return data_train_n, data_valid_n
def remove_foot_contact_info(self, data):
if self.discard_foot_contacts:
return remove_foot_contacts(data)
else:
return data
class Batch(object):
"""
Represents one minibatch.
"""
def __init__(self, input_, targets, ids, mask=None):
self.inputs_ = input_
self.targets = targets
self.ids = ids
self.mask = mask if mask is not None else np.zeros(input_.shape)
self.batch_size = self.inputs_.shape[0]
def __getitem__(self, item):
if not 0 <= item < self.batch_size:
raise IndexError('batch index {} out of bounds for batch size {}'.format(item, self.batch_size))
return Batch(self.inputs_[item:item+1, ...],
self.targets[item:item+1, ...],
self.ids[item:item+1, ...],
self.mask[item:item+1, ...])
def all_entries(self):
for i in range(self.batch_size):
yield self.inputs_[i], self.targets[i], self.ids[i], self.mask[i]
def perturbate(self, perturbator, reapply=False):
if perturbator is None:
self.targets = self.inputs_
else:
if reapply:
perturbated, mask = perturbator.reapply_last_perturbation(self.inputs_)
else:
perturbated, mask = perturbator.perturbate(self.inputs_)
self.targets = self.inputs_
self.inputs_ = perturbated
self.mask = mask
def remove_foot_contacts(self):
self.inputs_ = remove_foot_contacts(self.inputs_)
self.targets =remove_foot_contacts(self.targets)
self.mask = remove_foot_contacts(self.mask)
def copy(self):
return Batch(np.copy(self.inputs_),
np.copy(self.targets),
np.copy(self.ids),
np.copy(self.mask))
class AbstractFeeder(object):
def _get_batch(self, split, batch_ptr):
"""
Get the specified batch.
:param split: Which split to access.
:param batch_ptr: Which batch to access, i.e. index between 0 and number of batches for the given split.
:return: The retrieved batch.
"""
raise NotImplementedError('Method is abstract.')
def _next_batch_from_split(self, split):
"""
Returns the next available batch for the chosen split. Circular access if overflow happens.
:param split: 0, 1, or 2 for 'train', 'valid', or 'test' respectively.
:return: The next available batch
"""
raise NotImplementedError('Method is abstract.')
def _all_split_batches(self, split):
"""
Generator function looping over all available batches in the given split.
"""
for i in range(self._n_batches_split(split)):
yield self._next_batch_from_split(split)
def _n_batches_split(self, split):
raise NotImplementedError('Method is abstract.')
def _random_batch_from_split(self, split, rng=np.random):
"""
Returns a random batch from the requested split.
"""
batch_ptr = rng.randint(0, self._n_batches_split(split))
batch = self._get_batch(split, batch_ptr)
return batch
def reshuffle_train(self, rng=np.random):
"""
Reshuffles the training data set.
"""
raise NotImplementedError('Method is abstract.')
def n_batches_train(self):
return self._n_batches_split(0)
def n_batches_valid(self):
return self._n_batches_split(1)
def n_batches_test(self):
return self._n_batches_split(2)
def next_batch_train(self):
return self._next_batch_from_split(0)
def next_batch_valid(self):
return self._next_batch_from_split(1)
def next_batch_test(self):
return self._next_batch_from_split(2)
def train_batches(self):
return self._all_split_batches(0)
def valid_batches(self):
return self._all_split_batches(1)
def test_batches(self):
return self._all_split_batches(2)
def random_train_batch(self, rng):
return self._random_batch_from_split(0, rng)
def random_valid_batch(self, rng):
return self._random_batch_from_split(1, rng)
def random_test_batch(self, rng):
return self._random_batch_from_split(2, rng)
def valid_batch_from_idxs(self, indices):
raise NotImplementedError('Method is abstract.')
class FeederV2(AbstractFeeder):
def __init__(self, data_train, data_valid, batch_size, rng=np.random):
# `data_train` can be None. If so, only the "validation" functions can be used.
self._data_train = data_train
self._data_valid = data_valid
self._batch_size = batch_size
self._splits = [data_train, data_valid]
self._rng = rng
# compute how many batches per split
n_train = int(np.ceil(float(len(data_train)) / float(batch_size))) if data_train is not None else 0
n_valid = int(np.ceil(float(len(data_valid)) / float(batch_size)))
print('Number of batches per split: train {} valid {}'.format(
n_train, n_valid))
self._n_batches = [n_train, n_valid] # number of batches per split
self._batch_ptrs = [0, 0] # pointers to the next available batch per split
self._indices = [np.arange(0, len(data_train) if data_train is not None else 0), np.arange(0, len(data_valid))]
# reshuffle the indices
self._rng.shuffle(self._indices[0])
self._rng.shuffle(self._indices[1])
def _get_batch_ptr(self, split):
return self._batch_ptrs[split]
def _set_batch_ptr(self, split, value):
new_val = value if value < self._n_batches[split] else 0
self._batch_ptrs[split] = new_val
def _get_batch(self, split, batch_ptr):
"""
Get the specified batch.
:param split: Which split to access.
:param batch_ptr: Which batch to access, i.e. index between 0 and number of batches for the given split.
:return: The retrieved batch.
"""
assert 0 <= batch_ptr < self._n_batches_split(split), 'batch pointer out of range'
start_idx = batch_ptr * self._batch_size
end_idx = (batch_ptr + 1) * self._batch_size
# because we want to use all available data, must be careful that `end_idx` is valid
end_idx = end_idx if end_idx <= len(self._indices[split]) else len(self._indices[split])
indices = self._indices[split][start_idx:end_idx]
inputs = self._splits[split][indices, ...]
targets = np.copy(inputs) # at the moment targets are inputs
batch = Batch(inputs, targets, indices)
return batch
def _next_batch_from_split(self, split):
"""
Returns the next available batch for the chosen split. Circular access if overflow happens.
:param split: 0, 1, or 2 for 'train', 'valid', or 'test' respectively.
:return: The next available batch
"""
if split > 1:
return Batch(np.array([]), np.array([]), np.array([]))
batch_ptr = self._get_batch_ptr(split)
next_batch = self._get_batch(split, batch_ptr)
self._set_batch_ptr(split, batch_ptr + 1)
return next_batch
def _n_batches_split(self, split):
return self._n_batches[split] if split <= 1 else 0
def reshuffle_train(self, rng=np.random):
"""
Reshuffles the training data set.
"""
rng.shuffle(self._indices[0])
def valid_batch_from_idxs(self, indices):
data = self._splits[1][indices, ...]
return Batch(data, data, indices)
| [
"numpy.copy",
"numpy.zeros",
"numpy.transpose",
"numpy.expand_dims",
"numpy.array",
"numpy.reshape",
"numpy.swapaxes",
"numpy.squeeze",
"os.path.join",
"numpy.concatenate"
] | [((581, 594), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (588, 594), True, 'import numpy as np\n'), ((775, 830), 'numpy.reshape', 'np.reshape', (['pose', '[batch_size, n_joints, 3, seq_length]'], {}), '(pose, [batch_size, n_joints, 3, seq_length])\n', (785, 830), True, 'import numpy as np\n'), ((844, 878), 'numpy.transpose', 'np.transpose', (['pose_r', '[0, 1, 3, 2]'], {}), '(pose_r, [0, 1, 3, 2])\n', (856, 878), True, 'import numpy as np\n'), ((935, 962), 'numpy.zeros', 'np.zeros', (['(rest.shape + (2,))'], {}), '(rest.shape + (2,))\n', (943, 962), True, 'import numpy as np\n'), ((1082, 1127), 'numpy.concatenate', 'np.concatenate', (['[pose_3, rest_concat]'], {'axis': '(1)'}), '([pose_3, rest_concat], axis=1)\n', (1096, 1127), True, 'import numpy as np\n'), ((1907, 1920), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (1914, 1920), True, 'import numpy as np\n'), ((2097, 2129), 'numpy.transpose', 'np.transpose', (['pose', '[0, 1, 3, 2]'], {}), '(pose, [0, 1, 3, 2])\n', (2109, 2129), True, 'import numpy as np\n'), ((2143, 2201), 'numpy.reshape', 'np.reshape', (['pose_r', '[batch_size, n_joints * 3, seq_length]'], {}), '(pose_r, [batch_size, n_joints * 3, seq_length])\n', (2153, 2201), True, 'import numpy as np\n'), ((2328, 2368), 'numpy.concatenate', 'np.concatenate', (['[pose_1, rest_1]'], {'axis': '(1)'}), '([pose_1, rest_1], axis=1)\n', (2342, 2368), True, 'import numpy as np\n'), ((1645, 1665), 'numpy.squeeze', 'np.squeeze', (['data', '(-1)'], {}), '(data, -1)\n', (1655, 1665), True, 'import numpy as np\n'), ((4263, 4286), 'numpy.swapaxes', 'np.swapaxes', (['data', '(1)', '(2)'], {}), '(data, 1, 2)\n', (4274, 4286), True, 'import numpy as np\n'), ((4906, 4934), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (4920, 4934), True, 'import numpy as np\n'), ((5106, 5134), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (5120, 5134), True, 'import numpy as np\n'), ((12535, 12550), 'numpy.copy', 'np.copy', (['inputs'], {}), '(inputs)\n', (12542, 12550), True, 'import numpy as np\n'), ((997, 1021), 'numpy.expand_dims', 'np.expand_dims', (['rest', '(-1)'], {}), '(rest, -1)\n', (1011, 1021), True, 'import numpy as np\n'), ((6339, 6361), 'numpy.zeros', 'np.zeros', (['input_.shape'], {}), '(input_.shape)\n', (6347, 6361), True, 'import numpy as np\n'), ((7647, 7668), 'numpy.copy', 'np.copy', (['self.inputs_'], {}), '(self.inputs_)\n', (7654, 7668), True, 'import numpy as np\n'), ((7691, 7712), 'numpy.copy', 'np.copy', (['self.targets'], {}), '(self.targets)\n', (7698, 7712), True, 'import numpy as np\n'), ((7735, 7752), 'numpy.copy', 'np.copy', (['self.ids'], {}), '(self.ids)\n', (7742, 7752), True, 'import numpy as np\n'), ((7775, 7793), 'numpy.copy', 'np.copy', (['self.mask'], {}), '(self.mask)\n', (7782, 7793), True, 'import numpy as np\n'), ((4191, 4237), 'os.path.join', 'os.path.join', (['self.splits_path[split]', 'db_name'], {}), '(self.splits_path[split], db_name)\n', (4203, 4237), False, 'import os\n'), ((12994, 13006), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13002, 13006), True, 'import numpy as np\n'), ((13008, 13020), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13016, 13020), True, 'import numpy as np\n'), ((13022, 13034), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13030, 13034), True, 'import numpy as np\n')] |
from .architecture import Network
import numpy as np
import math
import keras
import keras.backend as K
from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape
from keras.models import Model, load_model
from neuralparticles.tensorflow.tools.spatial_transformer import SpatialTransformer, stn_transform, stn_tranform_inv
from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask
from neuralparticles.tools.data_helpers import get_data
from neuralparticles.tensorflow.losses.tf_approxmatch import emd_loss
from neuralparticles.tensorflow.layers.mult_const_layer import MultConst
from neuralparticles.tensorflow.losses.repulsion_loss import repulsion_loss
def stack(X, axis, **kwargs):
def tmp(X):
import tensorflow as tf
return tf.stack(X,axis=axis)
return Lambda(tmp, **kwargs)(X)
def unstack(X, axis, **kwargs):
def tmp(X):
import tensorflow as tf
return tf.unstack(X,axis=axis)
return Lambda(tmp, **kwargs)(X)
class PCPNet(Network):
def _init_vars(self, **kwargs):
self.model = None
self.decay = kwargs.get("decay", 0.0)
self.learning_rate = kwargs.get("learning_rate", 1e-3)
self.fac = kwargs.get("fac", 32)
self.dropout = kwargs.get("dropout", 0.2)
self.l2_reg = kwargs.get("l2_reg", 0.0)
self.truncate = kwargs.get("truncate", False)
self.pad_val = kwargs.get("pad_val", 0.0)
self.mask = kwargs.get("mask", False)
self.particle_cnt_src = kwargs.get("par_cnt")
self.particle_cnt_dst = kwargs.get("par_cnt_ref")
self.features = kwargs.get("features")
self.dim = kwargs.get("dim", 2)
self.factor = kwargs.get("factor")
self.factor_d = math.pow(self.factor, 1/self.dim)
self.res = kwargs.get("res")
self.lres = int(self.res/self.factor_d)
self.norm_factor = kwargs.get("norm_factor")
def _init_optimizer(self, epochs=1):
self.optimizer = keras.optimizers.adam(lr=self.learning_rate, decay=self.decay)
def _build_model(self):
input_xyz = Input((self.particle_cnt_src, 3), name="main_input")
inputs = [input_xyz]
if self.mask:
mask = zero_mask(input_xyz, self.pad_val, name="mask")
input_xyz = multiply([input_xyz, mask])
stn_input = Input((self.particle_cnt_src,3))
self.stn = SpatialTransformer(stn_input,self.particle_cnt_src,dropout=self.dropout,quat=True,norm=True)
stn_model = Model(inputs=stn_input, outputs=self.stn, name="stn")
self.stn = stn_model(input_xyz)
input_xyz = stn_transform(self.stn,input_xyz,quat=True, name='trans')
if len(self.features) > 0:
inputs.append(Input((self.particle_cnt_src, len(self.features) + (2 if 'v' in self.features else 0)), name="aux_input"))
aux_input = MultConst(1./self.norm_factor)(inputs[1])
if self.mask:
aux_input = multiply([aux_input, mask])
if 'v' in self.features:
aux_input = Lambda(lambda a: concatenate([stn_transform(self.stn, a[:,:,:3]/100,quat=True),a[:,:,3:]], axis=-1), name='aux_trans')(aux_input)
input_xyz = concatenate([input_xyz, aux_input], axis=-1, name='input_concatenation')
x = Conv1D(self.fac, 1)(input_xyz)
x = Conv1D(self.fac, 1)(x)
x = stn_transform(SpatialTransformer(x,particle_cnt_src,fac,1),x)
x = Conv1D(self.fac, 1)(x)
x = Conv1D(self.fac*2, 1)(x)
x = Conv1D(self.fac*4, 1)(x)
x = multiply([x,mask])
x = unstack(x,1)
x = add(x)
for i in range(self.particle_cnt_dst//self.particle_cnt_src):
tmp = Conv1D(self.fac*32, 1, name="expansion_1_"+str(i+1), kernel_regularizer=keras.regularizers.l2(self.l2_reg))(x)
tmp = Conv1D(self.fac*16, 1, name="expansion_2_"+str(i+1), kernel_regularizer=keras.regularizers.l2(self.l2_reg))(tmp)
l.append(tmp)
x = concatenate(l, axis=1, name="pixel_conv") if self.particle_cnt_dst//self.particle_cnt_src > 1 else l[0]
if self.truncate:
x_t = Dropout(self.dropout)(x)
x_t = Dense(self.fac, activation='elu', kernel_regularizer=keras.regularizers.l2(0.02), name="truncation_1")(x_t)
x_t = Dropout(self.dropout)(x_t)
b = np.ones(1, dtype='float32')
W = np.zeros((self.fac, 1), dtype='float32')
trunc = Dense(1, activation='elu', kernel_regularizer=keras.regularizers.l2(0.02), weights=[W,b], name="cnt")(x_t)
out_mask = soft_trunc_mask(trunc, self.particle_cnt_dst, name="truncation_mask")
if self.mask:
x = Lambda(lambda v: v[0]/K.sum(v[1],axis=1))([x, mask])
x = Dropout(self.dropout)(x)
x = Dense(self.particle_cnt_dst, kernel_regularizer=keras.regularizers.l2(0.02))(x)
x = Dropout(self.dropout)(x)
x = Dense(self.particle_cnt_dst, kernel_regularizer=keras.regularizers.l2(0.02))(x)
x = Dropout(self.dropout)(x)
x = Dense(3*self.particle_cnt_dst, kernel_regularizer=keras.regularizers.l2(0.02))(x)
x = Reshape((self.particle_cnt_dst,3))(x)
out = stn_transform_inv(self.stn,x,quat=True)
if self.truncate:
out = multiply([out, Reshape((self.particle_cnt_dst,1))(out_mask)], name="masked_coords")
self.model = Model(inputs=inputs, outputs=[out,trunc])
trunc_exp = stack([trunc, trunc, trunc], 2)
out = concatenate([out, trunc_exp], 1, name='points')
self.train_model = Model(inputs=inputs, outputs=[out, trunc])
else:
self.model = Model(inputs=inputs, outputs=out)
self.train_model = Model(inputs=inputs, outputs=out)
def mask_loss(self, y_true, y_pred):
if y_pred.get_shape()[1] > self.particle_cnt_dst:
return (emd_loss(y_true * zero_mask(y_true, self.pad_val), y_pred[:,:self.particle_cnt_dst]) if self.mask else emd_loss(y_true, y_pred[:,:self.particle_cnt_dst]))# / (y_pred[:,-1, 0])
else:
return emd_loss(y_true * zero_mask(y_true, self.pad_val), y_pred * zero_mask(y_true, self.pad_val)) if self.mask else emd_loss(y_true, y_pred)
def particle_metric(self, y_true, y_pred):
if y_pred.get_shape()[1] > self.particle_cnt_dst:
return (emd_loss(y_true * zero_mask(y_true, self.pad_val), y_pred[:,:self.particle_cnt_dst]) if self.mask else emd_loss(y_true, y_pred[:,:self.particle_cnt_dst]))
elif y_pred.get_shape()[1] < self.particle_cnt_dst:
return keras.losses.mse(y_true, y_pred)
else:
return emd_loss(y_true * zero_mask(y_true, self.pad_val), y_pred * zero_mask(y_true, self.pad_val)) if self.mask else emd_loss(y_true, y_pred)
def compile_model(self):
if self.truncate:
self.train_model.compile(loss=[self.mask_loss, 'mse'], optimizer=keras.optimizers.adam(lr=self.learning_rate, decay=self.decay), metrics=[self.particle_metric], loss_weights=[1.0,1.0])
else:
self.train_model.compile(loss=self.mask_loss, optimizer=keras.optimizers.adam(lr=self.learning_rate, decay=self.decay))
def _train(self, epochs, **kwargs):
callbacks = kwargs.get("callbacks", [])
if "generator" in kwargs:
return self.train_model.fit_generator(generator=kwargs['generator'], validation_data=kwargs.get('val_generator'), use_multiprocessing=True, workers=6, verbose=1, callbacks=callbacks, epochs=epochs)
else:
src_data = kwargs.get("src")
ref_data = kwargs.get("ref")
val_split = kwargs.get("val_split", 0.1)
batch_size = kwargs.get("batch_size", 32)
trunc_ref = np.count_nonzero(ref_data[:,:,:1] != self.pad_val, axis=1)/self.particle_cnt_dst
trunc_ref_exp = np.repeat(trunc_ref, 3, axis=-1)
trunc_ref_exp = np.expand_dims(trunc_ref_exp, axis=1)
return self.train_model.fit(x=src_data,y=[np.concatenate([ref_data, trunc_ref_exp], axis=1), trunc_ref] if self.truncate else ref_data, validation_split=val_split,
epochs=epochs, batch_size=batch_size, verbose=1, callbacks=callbacks)
def predict(self, x, batch_size=32):
return self.model.predict(x, batch_size=batch_size)
def save_model(self, path):
self.model.save(path)
def load_model(self, path):
self.model = load_model(path, custom_objects={'mask_loss': self.mask_loss, 'Interpolate': Interpolate, 'SampleAndGroup': SampleAndGroup, 'MultConst': MultConst})
if self.truncate:
out, trunc = self.model.outputs
trunc_exp = stack([trunc, trunc, trunc], 2)
out = concatenate([out, trunc_exp], 1, name='points')
self.train_model = Model(inputs=self.model.inputs, outputs=[out, trunc])
else:
self.train_model = Model(self.model.inputs, self.model.outputs)
| [
"keras.models.load_model",
"neuralparticles.tensorflow.tools.zero_mask.soft_trunc_mask",
"keras.regularizers.l2",
"numpy.ones",
"keras.models.Model",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.layers.Reshape",
"keras.optimizers.adam",
"math.pow",
"neuralparticles.tensorflow.tools.s... | [((821, 843), 'tensorflow.stack', 'tf.stack', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (829, 843), True, 'import tensorflow as tf\n'), ((854, 875), 'keras.layers.Lambda', 'Lambda', (['tmp'], {}), '(tmp, **kwargs)\n', (860, 875), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((975, 999), 'tensorflow.unstack', 'tf.unstack', (['X'], {'axis': 'axis'}), '(X, axis=axis)\n', (985, 999), True, 'import tensorflow as tf\n'), ((1010, 1031), 'keras.layers.Lambda', 'Lambda', (['tmp'], {}), '(tmp, **kwargs)\n', (1016, 1031), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((1797, 1832), 'math.pow', 'math.pow', (['self.factor', '(1 / self.dim)'], {}), '(self.factor, 1 / self.dim)\n', (1805, 1832), False, 'import math\n'), ((2038, 2100), 'keras.optimizers.adam', 'keras.optimizers.adam', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (2059, 2100), False, 'import keras\n'), ((2162, 2214), 'keras.layers.Input', 'Input', (['(self.particle_cnt_src, 3)'], {'name': '"""main_input"""'}), "((self.particle_cnt_src, 3), name='main_input')\n", (2167, 2214), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((2407, 2440), 'keras.layers.Input', 'Input', (['(self.particle_cnt_src, 3)'], {}), '((self.particle_cnt_src, 3))\n', (2412, 2440), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((2459, 2559), 'neuralparticles.tensorflow.tools.spatial_transformer.SpatialTransformer', 'SpatialTransformer', (['stn_input', 'self.particle_cnt_src'], {'dropout': 'self.dropout', 'quat': '(True)', 'norm': '(True)'}), '(stn_input, self.particle_cnt_src, dropout=self.dropout,\n quat=True, norm=True)\n', (2477, 2559), False, 'from neuralparticles.tensorflow.tools.spatial_transformer import SpatialTransformer, stn_transform, stn_tranform_inv\n'), ((2572, 2625), 'keras.models.Model', 'Model', ([], {'inputs': 'stn_input', 'outputs': 'self.stn', 'name': '"""stn"""'}), "(inputs=stn_input, outputs=self.stn, name='stn')\n", (2577, 2625), False, 'from keras.models import Model, load_model\n'), ((2687, 2746), 'neuralparticles.tensorflow.tools.spatial_transformer.stn_transform', 'stn_transform', (['self.stn', 'input_xyz'], {'quat': '(True)', 'name': '"""trans"""'}), "(self.stn, input_xyz, quat=True, name='trans')\n", (2700, 2746), False, 'from neuralparticles.tensorflow.tools.spatial_transformer import SpatialTransformer, stn_transform, stn_tranform_inv\n'), ((3637, 3656), 'keras.layers.multiply', 'multiply', (['[x, mask]'], {}), '([x, mask])\n', (3645, 3656), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3694, 3700), 'keras.layers.add', 'add', (['x'], {}), '(x)\n', (3697, 3700), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((8585, 8737), 'keras.models.load_model', 'load_model', (['path'], {'custom_objects': "{'mask_loss': self.mask_loss, 'Interpolate': Interpolate, 'SampleAndGroup':\n SampleAndGroup, 'MultConst': MultConst}"}), "(path, custom_objects={'mask_loss': self.mask_loss, 'Interpolate':\n Interpolate, 'SampleAndGroup': SampleAndGroup, 'MultConst': MultConst})\n", (8595, 8737), False, 'from keras.models import Model, load_model\n'), ((2286, 2333), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['input_xyz', 'self.pad_val'], {'name': '"""mask"""'}), "(input_xyz, self.pad_val, name='mask')\n", (2295, 2333), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((2358, 2385), 'keras.layers.multiply', 'multiply', (['[input_xyz, mask]'], {}), '([input_xyz, mask])\n', (2366, 2385), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3287, 3359), 'keras.layers.concatenate', 'concatenate', (['[input_xyz, aux_input]'], {'axis': '(-1)', 'name': '"""input_concatenation"""'}), "([input_xyz, aux_input], axis=-1, name='input_concatenation')\n", (3298, 3359), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3373, 3392), 'keras.layers.Conv1D', 'Conv1D', (['self.fac', '(1)'], {}), '(self.fac, 1)\n', (3379, 3392), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3416, 3435), 'keras.layers.Conv1D', 'Conv1D', (['self.fac', '(1)'], {}), '(self.fac, 1)\n', (3422, 3435), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3466, 3513), 'neuralparticles.tensorflow.tools.spatial_transformer.SpatialTransformer', 'SpatialTransformer', (['x', 'particle_cnt_src', 'fac', '(1)'], {}), '(x, particle_cnt_src, fac, 1)\n', (3484, 3513), False, 'from neuralparticles.tensorflow.tools.spatial_transformer import SpatialTransformer, stn_transform, stn_tranform_inv\n'), ((3527, 3546), 'keras.layers.Conv1D', 'Conv1D', (['self.fac', '(1)'], {}), '(self.fac, 1)\n', (3533, 3546), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3562, 3585), 'keras.layers.Conv1D', 'Conv1D', (['(self.fac * 2)', '(1)'], {}), '(self.fac * 2, 1)\n', (3568, 3585), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((3599, 3622), 'keras.layers.Conv1D', 'Conv1D', (['(self.fac * 4)', '(1)'], {}), '(self.fac * 4, 1)\n', (3605, 3622), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((4070, 4111), 'keras.layers.concatenate', 'concatenate', (['l'], {'axis': '(1)', 'name': '"""pixel_conv"""'}), "(l, axis=1, name='pixel_conv')\n", (4081, 4111), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((4431, 4458), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': '"""float32"""'}), "(1, dtype='float32')\n", (4438, 4458), True, 'import numpy as np\n'), ((4475, 4515), 'numpy.zeros', 'np.zeros', (['(self.fac, 1)'], {'dtype': '"""float32"""'}), "((self.fac, 1), dtype='float32')\n", (4483, 4515), True, 'import numpy as np\n'), ((4666, 4735), 'neuralparticles.tensorflow.tools.zero_mask.soft_trunc_mask', 'soft_trunc_mask', (['trunc', 'self.particle_cnt_dst'], {'name': '"""truncation_mask"""'}), "(trunc, self.particle_cnt_dst, name='truncation_mask')\n", (4681, 4735), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((4849, 4870), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (4856, 4870), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((4978, 4999), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (4985, 4999), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((5107, 5128), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (5114, 5128), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((5247, 5282), 'keras.layers.Reshape', 'Reshape', (['(self.particle_cnt_dst, 3)'], {}), '((self.particle_cnt_dst, 3))\n', (5254, 5282), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((5493, 5535), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[out, trunc]'}), '(inputs=inputs, outputs=[out, trunc])\n', (5498, 5535), False, 'from keras.models import Model, load_model\n'), ((5609, 5656), 'keras.layers.concatenate', 'concatenate', (['[out, trunc_exp]', '(1)'], {'name': '"""points"""'}), "([out, trunc_exp], 1, name='points')\n", (5620, 5656), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((5688, 5730), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[out, trunc]'}), '(inputs=inputs, outputs=[out, trunc])\n', (5693, 5730), False, 'from keras.models import Model, load_model\n'), ((5770, 5803), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'out'}), '(inputs=inputs, outputs=out)\n', (5775, 5803), False, 'from keras.models import Model, load_model\n'), ((5835, 5868), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'out'}), '(inputs=inputs, outputs=out)\n', (5840, 5868), False, 'from keras.models import Model, load_model\n'), ((7975, 8007), 'numpy.repeat', 'np.repeat', (['trunc_ref', '(3)'], {'axis': '(-1)'}), '(trunc_ref, 3, axis=-1)\n', (7984, 8007), True, 'import numpy as np\n'), ((8036, 8073), 'numpy.expand_dims', 'np.expand_dims', (['trunc_ref_exp'], {'axis': '(1)'}), '(trunc_ref_exp, axis=1)\n', (8050, 8073), True, 'import numpy as np\n'), ((8878, 8925), 'keras.layers.concatenate', 'concatenate', (['[out, trunc_exp]', '(1)'], {'name': '"""points"""'}), "([out, trunc_exp], 1, name='points')\n", (8889, 8925), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((8957, 9010), 'keras.models.Model', 'Model', ([], {'inputs': 'self.model.inputs', 'outputs': '[out, trunc]'}), '(inputs=self.model.inputs, outputs=[out, trunc])\n', (8962, 9010), False, 'from keras.models import Model, load_model\n'), ((9056, 9100), 'keras.models.Model', 'Model', (['self.model.inputs', 'self.model.outputs'], {}), '(self.model.inputs, self.model.outputs)\n', (9061, 9100), False, 'from keras.models import Model, load_model\n'), ((2943, 2976), 'neuralparticles.tensorflow.layers.mult_const_layer.MultConst', 'MultConst', (['(1.0 / self.norm_factor)'], {}), '(1.0 / self.norm_factor)\n', (2952, 2976), False, 'from neuralparticles.tensorflow.layers.mult_const_layer import MultConst\n'), ((3039, 3066), 'keras.layers.multiply', 'multiply', (['[aux_input, mask]'], {}), '([aux_input, mask])\n', (3047, 3066), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((4219, 4240), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (4226, 4240), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((4388, 4409), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (4395, 4409), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((6096, 6147), 'neuralparticles.tensorflow.losses.tf_approxmatch.emd_loss', 'emd_loss', (['y_true', 'y_pred[:, :self.particle_cnt_dst]'], {}), '(y_true, y_pred[:, :self.particle_cnt_dst])\n', (6104, 6147), False, 'from neuralparticles.tensorflow.losses.tf_approxmatch import emd_loss\n'), ((6313, 6337), 'neuralparticles.tensorflow.losses.tf_approxmatch.emd_loss', 'emd_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6321, 6337), False, 'from neuralparticles.tensorflow.losses.tf_approxmatch import emd_loss\n'), ((6571, 6622), 'neuralparticles.tensorflow.losses.tf_approxmatch.emd_loss', 'emd_loss', (['y_true', 'y_pred[:, :self.particle_cnt_dst]'], {}), '(y_true, y_pred[:, :self.particle_cnt_dst])\n', (6579, 6622), False, 'from neuralparticles.tensorflow.losses.tf_approxmatch import emd_loss\n'), ((6702, 6734), 'keras.losses.mse', 'keras.losses.mse', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6718, 6734), False, 'import keras\n'), ((7865, 7925), 'numpy.count_nonzero', 'np.count_nonzero', (['(ref_data[:, :, :1] != self.pad_val)'], {'axis': '(1)'}), '(ref_data[:, :, :1] != self.pad_val, axis=1)\n', (7881, 7925), True, 'import numpy as np\n'), ((4934, 4961), 'keras.regularizers.l2', 'keras.regularizers.l2', (['(0.02)'], {}), '(0.02)\n', (4955, 4961), False, 'import keras\n'), ((5063, 5090), 'keras.regularizers.l2', 'keras.regularizers.l2', (['(0.02)'], {}), '(0.02)\n', (5084, 5090), False, 'import keras\n'), ((5194, 5221), 'keras.regularizers.l2', 'keras.regularizers.l2', (['(0.02)'], {}), '(0.02)\n', (5215, 5221), False, 'import keras\n'), ((6879, 6903), 'neuralparticles.tensorflow.losses.tf_approxmatch.emd_loss', 'emd_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6887, 6903), False, 'from neuralparticles.tensorflow.losses.tf_approxmatch import emd_loss\n'), ((7037, 7099), 'keras.optimizers.adam', 'keras.optimizers.adam', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (7058, 7099), False, 'import keras\n'), ((7239, 7301), 'keras.optimizers.adam', 'keras.optimizers.adam', ([], {'lr': 'self.learning_rate', 'decay': 'self.decay'}), '(lr=self.learning_rate, decay=self.decay)\n', (7260, 7301), False, 'import keras\n'), ((3862, 3896), 'keras.regularizers.l2', 'keras.regularizers.l2', (['self.l2_reg'], {}), '(self.l2_reg)\n', (3883, 3896), False, 'import keras\n'), ((3991, 4025), 'keras.regularizers.l2', 'keras.regularizers.l2', (['self.l2_reg'], {}), '(self.l2_reg)\n', (4012, 4025), False, 'import keras\n'), ((4315, 4342), 'keras.regularizers.l2', 'keras.regularizers.l2', (['(0.02)'], {}), '(0.02)\n', (4336, 4342), False, 'import keras\n'), ((4582, 4609), 'keras.regularizers.l2', 'keras.regularizers.l2', (['(0.02)'], {}), '(0.02)\n', (4603, 4609), False, 'import keras\n'), ((5399, 5434), 'keras.layers.Reshape', 'Reshape', (['(self.particle_cnt_dst, 1)'], {}), '((self.particle_cnt_dst, 1))\n', (5406, 5434), False, 'from keras.layers import Input, multiply, concatenate, Conv1D, Lambda, add, Dropout, Dense, Reshape\n'), ((6011, 6042), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['y_true', 'self.pad_val'], {}), '(y_true, self.pad_val)\n', (6020, 6042), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((6220, 6251), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['y_true', 'self.pad_val'], {}), '(y_true, self.pad_val)\n', (6229, 6251), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((6262, 6293), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['y_true', 'self.pad_val'], {}), '(y_true, self.pad_val)\n', (6271, 6293), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((6486, 6517), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['y_true', 'self.pad_val'], {}), '(y_true, self.pad_val)\n', (6495, 6517), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((4797, 4816), 'keras.backend.sum', 'K.sum', (['v[1]'], {'axis': '(1)'}), '(v[1], axis=1)\n', (4802, 4816), True, 'import keras.backend as K\n'), ((6786, 6817), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['y_true', 'self.pad_val'], {}), '(y_true, self.pad_val)\n', (6795, 6817), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((6828, 6859), 'neuralparticles.tensorflow.tools.zero_mask.zero_mask', 'zero_mask', (['y_true', 'self.pad_val'], {}), '(y_true, self.pad_val)\n', (6837, 6859), False, 'from neuralparticles.tensorflow.tools.zero_mask import zero_mask, soft_trunc_mask\n'), ((8141, 8190), 'numpy.concatenate', 'np.concatenate', (['[ref_data, trunc_ref_exp]'], {'axis': '(1)'}), '([ref_data, trunc_ref_exp], axis=1)\n', (8155, 8190), True, 'import numpy as np\n'), ((3163, 3216), 'neuralparticles.tensorflow.tools.spatial_transformer.stn_transform', 'stn_transform', (['self.stn', '(a[:, :, :3] / 100)'], {'quat': '(True)'}), '(self.stn, a[:, :, :3] / 100, quat=True)\n', (3176, 3216), False, 'from neuralparticles.tensorflow.tools.spatial_transformer import SpatialTransformer, stn_transform, stn_tranform_inv\n')] |
from numba import njit, boolean, int64, float64, optional
from numba.experimental import jitclass
import numpy as np
@njit
def cluster(data, n_clusters, ncat, maxit=100):
"""
Runs KMeans on data and returns the labels of each sample.
Parameters
----------
data: numpy array
Rows are instances and columns variables.
n_clusters: int
Number of clusters
ncat:
The number of categories of each variable.
maxit: int
The maximum number of iterations of the KMeans algorithm.
"""
kmeans = Kmeans(n_clusters, ncat, None, maxit).fit(data)
res = kmeans.labels.ravel()
assert res.shape[0] == data.shape[0]
return res
@njit
def nan_to_num(data):
""" Replaces NaNs with zeros. """
shape = data.shape
data = data.ravel()
data[np.isnan(data)] = 0
return data.reshape(shape)
@njit
def get_distance(a, b, ncat):
distance = 0.
for i in range(len(a)):
if np.isnan(a[i]) or np.isnan(b[i]):
distance += 1
elif ncat[i] > 1:
distance += min(abs(a[i]-b[i]), 1)
else:
distance += (a[i]-b[i])**2
return distance
@njit
def assign(point, centroids, ncat):
minDist = np.Inf
for i in range(centroids.shape[0]):
dist = get_distance(point, centroids[i, :], ncat)
if dist < minDist:
minDist = dist
label = i
return label, minDist
@njit
def assign_clusters(data, centroids, ncat):
error = 0
labels = np.zeros(data.shape[0], dtype=np.int64)
for i in range(data.shape[0]):
labels[i], dist = assign(data[i, :], centroids, ncat)
error += dist
return labels, error
@njit
def get_error(centroid, data, ncat):
error = 0
for j in range(data.shape[0]):
error += get_distance(centroid, data[j, :], ncat)
return error
@jitclass([
('k', int64),
('ncat', int64[:]),
('data', float64[:,:]),
('nvars', int64),
('centroids', float64[:,:]),
('labels', int64[:]),
('error', float64),
('error1', float64),
('error2', float64),
('it', int64),
('maxit', int64),
('thr', optional(float64)),
])
class Kmeans:
def __init__(self, k, ncat, thr=None, maxit=100):
"""
Minimal KMeans implementation in numba.
Parameters
----------
k: int
Number of clusters.
data: numpy array
Data with instances as rows and variables as columns.
ncat: numpy array
Number of categories of each variable.
thr:
Threshold at which the algorithm is considered to have converged.
maxit: int
Maximum number of iterations.
"""
self.k = k
self.nvars = 0
self.ncat = ncat
self.data = np.empty((0, 0), dtype=np.float64)
self.centroids = np.empty((0, 0), dtype=np.float64)
self.labels = np.empty((0), dtype=np.int64)
self.thr = thr
self.error = 0
self.error1 = 0
self.error2 = 0
self.it = 0
self.maxit = maxit
def init_centroids(self):
seed_idx = np.random.choice(self.data.shape[0], self.k, replace=False)
self.centroids = self.data[seed_idx, :]
self.error1 = 0
def update_centroids(self):
error = 0
for i in range(self.k):
cluster_data = self.data[self.labels == i, :]
if cluster_data.shape[0] > 0:
for j in range(self.nvars):
if np.all(np.isnan(cluster_data[:, j])):
self.centroids[i, j] = 0.
else:
self.centroids[i, j] = np.nanmean(cluster_data[:, j])
error += get_error(self.centroids[i, :], cluster_data, self.ncat)
self.error1 = error
def assign_clusters(self):
self.labels, self.error2 = assign_clusters(self.data, self.centroids, self.ncat)
def fit(self, data):
assert data.shape[0] >= self.k, "Too few data points."
self.data = data
self.nvars = data.shape[1]
if self.thr is None:
self.thr = data.size * 1e-6 # weird heuristic
self.init_centroids()
self.assign_clusters()
self.it = 0
while (abs(self.error1-self.error2) > self.thr) and (self.it < self.maxit):
self.it += 1
self.update_centroids()
self.assign_clusters()
return self
def makeRandomPoint(n, lower, upper, missing_perc=0.0):
points = np.random.normal(loc=upper, size=[lower, n])
missing_mask = np.full(points.size, False)
missing_mask[:int(missing_perc * points.size)] = True
np.random.shuffle(missing_mask)
missing_mask = missing_mask.astype(bool)
points.ravel()[missing_mask] = np.nan
return points
| [
"numpy.full",
"numpy.empty",
"numpy.zeros",
"numba.optional",
"numpy.isnan",
"numpy.random.normal",
"numpy.random.choice",
"numpy.random.shuffle",
"numpy.nanmean"
] | [((1557, 1596), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {'dtype': 'np.int64'}), '(data.shape[0], dtype=np.int64)\n', (1565, 1596), True, 'import numpy as np\n'), ((4638, 4682), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'upper', 'size': '[lower, n]'}), '(loc=upper, size=[lower, n])\n', (4654, 4682), True, 'import numpy as np\n'), ((4702, 4729), 'numpy.full', 'np.full', (['points.size', '(False)'], {}), '(points.size, False)\n', (4709, 4729), True, 'import numpy as np\n'), ((4792, 4823), 'numpy.random.shuffle', 'np.random.shuffle', (['missing_mask'], {}), '(missing_mask)\n', (4809, 4823), True, 'import numpy as np\n'), ((861, 875), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (869, 875), True, 'import numpy as np\n'), ((2911, 2945), 'numpy.empty', 'np.empty', (['(0, 0)'], {'dtype': 'np.float64'}), '((0, 0), dtype=np.float64)\n', (2919, 2945), True, 'import numpy as np\n'), ((2971, 3005), 'numpy.empty', 'np.empty', (['(0, 0)'], {'dtype': 'np.float64'}), '((0, 0), dtype=np.float64)\n', (2979, 3005), True, 'import numpy as np\n'), ((3028, 3055), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.int64'}), '(0, dtype=np.int64)\n', (3036, 3055), True, 'import numpy as np\n'), ((3249, 3308), 'numpy.random.choice', 'np.random.choice', (['self.data.shape[0]', 'self.k'], {'replace': '(False)'}), '(self.data.shape[0], self.k, replace=False)\n', (3265, 3308), True, 'import numpy as np\n'), ((1007, 1021), 'numpy.isnan', 'np.isnan', (['a[i]'], {}), '(a[i])\n', (1015, 1021), True, 'import numpy as np\n'), ((1025, 1039), 'numpy.isnan', 'np.isnan', (['b[i]'], {}), '(b[i])\n', (1033, 1039), True, 'import numpy as np\n'), ((2202, 2219), 'numba.optional', 'optional', (['float64'], {}), '(float64)\n', (2210, 2219), False, 'from numba import njit, boolean, int64, float64, optional\n'), ((3638, 3666), 'numpy.isnan', 'np.isnan', (['cluster_data[:, j]'], {}), '(cluster_data[:, j])\n', (3646, 3666), True, 'import numpy as np\n'), ((3792, 3822), 'numpy.nanmean', 'np.nanmean', (['cluster_data[:, j]'], {}), '(cluster_data[:, j])\n', (3802, 3822), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Spectral Bandpass Dependence Correction
=======================================
Defines objects to perform spectral bandpass dependence correction.
The following correction methods are available:
- :func:`colour.colorimetry.bandpass_correction_Stearns1988`:
*Stearns and Stearns (1988)* spectral bandpass dependence correction
method.
- :attr:`colour.BANDPASS_CORRECTION_METHODS`: Supported spectral bandpass
dependence correction methods.
- :func:`colour.bandpass_correction`: Spectral bandpass dependence
correction using given method.
See Also
--------
`Spectral Bandpass Dependence Correction Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/colorimetry/correction.ipynb>`_
References
----------
- :cite:`Stearns1988a` : <NAME>., & <NAME>. (1988). An example
of a method for correcting radiance data for Bandpass error. Color Research
& Application, 13(4), 257-259. doi:10.1002/col.5080130410
- :cite:`Westland2012f` : <NAME>., <NAME>., & <NAME>. (2012).
Correction for Spectral Bandpass. In Computational Colour Science Using
MATLAB (2nd ed., p. 38). ISBN:978-0-470-66569-5
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.utilities import CaseInsensitiveMapping
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'bandpass_correction_Stearns1988', 'BANDPASS_CORRECTION_METHODS',
'bandpass_correction'
]
ALPHA_STEARNS = 0.083
def bandpass_correction_Stearns1988(sd):
"""
Implements spectral bandpass dependence correction on given spectral
distribution using *Stearns and Stearns (1988)* method.
Parameters
----------
sd : SpectralDistribution
Spectral distribution.
Returns
-------
SpectralDistribution
Spectral bandpass dependence corrected spectral distribution.
References
----------
:cite:`Stearns1988a`, :cite:`Westland2012f`
Examples
--------
>>> from colour import SpectralDistribution
>>> from colour.utilities import numpy_print_options
>>> data = {
... 500: 0.0651,
... 520: 0.0705,
... 540: 0.0772,
... 560: 0.0870,
... 580: 0.1128,
... 600: 0.1360
... }
>>> with numpy_print_options(suppress=True):
... bandpass_correction_Stearns1988(SpectralDistribution(data))
... # doctest: +ELLIPSIS
SpectralDistribution([[ 500. , 0.0646518...],
[ 520. , 0.0704293...],
[ 540. , 0.0769485...],
[ 560. , 0.0856928...],
[ 580. , 0.1129644...],
[ 600. , 0.1379256...]],
interpolator=SpragueInterpolator,
interpolator_args={},
extrapolator=Extrapolator,
extrapolator_args={...})
"""
values = np.copy(sd.values)
values[0] = (1 + ALPHA_STEARNS) * values[0] - ALPHA_STEARNS * values[1]
values[-1] = (1 + ALPHA_STEARNS) * values[-1] - ALPHA_STEARNS * values[-2]
for i in range(1, len(values) - 1):
values[i] = (-ALPHA_STEARNS * values[i - 1] +
(1 + 2 * ALPHA_STEARNS) * values[i] -
ALPHA_STEARNS * values[i + 1])
sd.values = values
return sd
BANDPASS_CORRECTION_METHODS = CaseInsensitiveMapping({
'Stearns 1988': bandpass_correction_Stearns1988
})
BANDPASS_CORRECTION_METHODS.__doc__ = """
Supported spectral bandpass dependence correction methods.
BANDPASS_CORRECTION_METHODS : CaseInsensitiveMapping
**{'Stearns 1988', }**
"""
def bandpass_correction(sd, method='Stearns 1988'):
"""
Implements spectral bandpass dependence correction on given spectral
distribution using given method.
Parameters
----------
sd : SpectralDistribution
Spectral distribution.
method : unicode, optional
{'Stearns 1988', }
Correction method.
Returns
-------
SpectralDistribution
Spectral bandpass dependence corrected spectral distribution.
References
----------
:cite:`Stearns1988a`, :cite:`Westland2012f`
Examples
--------
>>> from colour import SpectralDistribution
>>> from colour.utilities import numpy_print_options
>>> data = {
... 500: 0.0651,
... 520: 0.0705,
... 540: 0.0772,
... 560: 0.0870,
... 580: 0.1128,
... 600: 0.1360
... }
>>> with numpy_print_options(suppress=True):
... bandpass_correction(SpectralDistribution(data))
... # doctest: +ELLIPSIS
SpectralDistribution([[ 500. , 0.0646518...],
[ 520. , 0.0704293...],
[ 540. , 0.0769485...],
[ 560. , 0.0856928...],
[ 580. , 0.1129644...],
[ 600. , 0.1379256...]],
interpolator=SpragueInterpolator,
interpolator_args={},
extrapolator=Extrapolator,
extrapolator_args={...})
"""
return BANDPASS_CORRECTION_METHODS.get(method)(sd)
| [
"colour.utilities.CaseInsensitiveMapping",
"numpy.copy"
] | [((3710, 3783), 'colour.utilities.CaseInsensitiveMapping', 'CaseInsensitiveMapping', (["{'Stearns 1988': bandpass_correction_Stearns1988}"], {}), "({'Stearns 1988': bandpass_correction_Stearns1988})\n", (3732, 3783), False, 'from colour.utilities import CaseInsensitiveMapping\n'), ((3260, 3278), 'numpy.copy', 'np.copy', (['sd.values'], {}), '(sd.values)\n', (3267, 3278), True, 'import numpy as np\n')] |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import njit, prange
import numba_dppy as dppy
from numba_dppy import config
import unittest
from numba.tests.support import captured_stdout
import dpctl
from . import _helper
def prange_example():
n = 10
a = np.ones((n), dtype=np.float64)
b = np.ones((n), dtype=np.float64)
c = np.ones((n), dtype=np.float64)
for i in prange(n // 2):
a[i] = b[i] + c[i]
return a
@unittest.skipUnless(_helper.has_gpu_queues(), "test only on GPU system")
class TestParforMessage(unittest.TestCase):
def test_parfor_message(self):
with dpctl.device_context("opencl:gpu") as gpu_queue:
config.DEBUG = 1
jitted = njit(prange_example)
with captured_stdout() as got:
jitted()
config.DEBUG = 0
self.assertTrue("Parfor offloaded " in got.getvalue())
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"dpctl.device_context",
"numba.njit",
"numpy.ones",
"numba.tests.support.captured_stdout",
"numba.prange"
] | [((831, 859), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (838, 859), True, 'import numpy as np\n'), ((870, 898), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (877, 898), True, 'import numpy as np\n'), ((909, 937), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (916, 937), True, 'import numpy as np\n'), ((953, 967), 'numba.prange', 'prange', (['(n // 2)'], {}), '(n // 2)\n', (959, 967), False, 'from numba import njit, prange\n'), ((1497, 1512), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1510, 1512), False, 'import unittest\n'), ((1178, 1212), 'dpctl.device_context', 'dpctl.device_context', (['"""opencl:gpu"""'], {}), "('opencl:gpu')\n", (1198, 1212), False, 'import dpctl\n'), ((1277, 1297), 'numba.njit', 'njit', (['prange_example'], {}), '(prange_example)\n', (1281, 1297), False, 'from numba import njit, prange\n'), ((1316, 1333), 'numba.tests.support.captured_stdout', 'captured_stdout', ([], {}), '()\n', (1331, 1333), False, 'from numba.tests.support import captured_stdout\n')] |
from os import listdir
import subprocess
import fire
import matplotlib.pyplot as plt
import random
import pandas as pd
import re
import json
import numpy as np
from functools import reduce
from tqdm import tqdm
import pickle
from src.data.loop_ast import *
from src.data.schedule import *
class Stats():
def __init__(self, data_path, programs_folder="programs", stats_folder="programs"):
#self.tiramisu_root = tiramisu_root +'/'
self.data_path = data_path+'/'
self.programs_folder = programs_folder + '/'
self.stats_folder = stats_folder + '/'
self.stats_folder_created = ('stats' in listdir(self.data_path))
self.computation_types_dict = {
0: "arithmetic exp",
1: "inputs",
2: "stencil"
}
def get_all_programs_schedules(self):
for program in self.get_all_programs():
for schedule in self.get_all_schedules(program):
yield (program, schedule)
def get_all_programs(self):
folder = self.stats_folder if self.stats_folder_created else self.programs_folder
programs = listdir(self.data_path + folder)
for program in programs:
yield program
# def get_all_programs(self):
# folder = self.stats_folder if self.stats_folder_created else self.programs_folder
# programs = listdir(self.data_path + folder)
# programs = []
# batches = listdir(self.data_path + folder)
# batches = filter(lambda x: not x.startswith('-'), batches)
# for b in batches:
# batch_programs = listdir(self.data_path + folder + '/' + b + '/')
# for p in batch_programs:
# p = b+'/'+p
# programs.append(p)
# for program in programs:
# yield program
def get_all_schedules(self, program):
folder = self.stats_folder if self.stats_folder_created else self.programs_folder
schedules = listdir(self.data_path + folder +'/'+ program)
schedules = filter(lambda x:not x.endswith('.json'), schedules)
for schedule in schedules:
yield schedule
def create_stats_dir(self):
full_path = self.data_path + self.stats_folder + '/'
for program,schedule in self.get_all_programs_schedules():
self.exec_command("mkdir -p " + full_path + program + "/" + schedule)
self.stats_folder_created = True
def copy_results(self):
for program,schedule in self.get_all_programs_schedules():
program_json = self.data_path + self.programs_folder + "/" + program + "/" + program + '.json'
schedule_json = self.data_path + self.programs_folder + "/" + program \
+ "/" + schedule + '/' + schedule + '.json'
exec_times = self.data_path + self.programs_folder + "/" + program \
+ "/" + schedule + '/' + 'exec_times.txt'
self.exec_command("cp " + program_json + ' ' \
+ self.data_path + self.stats_folder + "/" + program + "/" + program + '.json')
self.exec_command("cp " + schedule_json + ' ' \
+ self.data_path + self.stats_folder + "/" + program \
+"/" + schedule + '/' + schedule + '.json')
try:
self.exec_command("cp " + exec_times + ' ' \
+ self.data_path + self.stats_folder + "/" + program \
+ "/" + schedule + '/exec_times.txt')
except FileNotFoundError:
print("didnt find exec times of : " + program + '/' + schedule)
self.exec_command("touch " +self.data_path + self.stats_folder \
+ '/' + program + '/' + schedule + '/exec_times.txt')
def read_times(self):
full_path = self.data_path + self.stats_folder + '/'
results = {}
for program,schedule in tqdm(self.get_all_programs_schedules(), total=1277934):
exec_times = np.array(self.read_exec_times(program, schedule), dtype='float64')
results[(program, schedule)] = {
'Min': min(exec_times),
'Max': max(exec_times),
'Mean': np.mean(exec_times),
'Median': np.median(exec_times),
'Std': np.std(exec_times),
'N_samples':len(exec_times),
'Times':exec_times
}
keys, vals = list(zip(*results.items()))
index = pd.MultiIndex.from_tuples(keys, names=("program", "schedule"))
return pd.DataFrame(list(vals), index=index)
def read_results(self):
results = {}
full_path = self.data_path + self.stats_folder + '/'
for program,schedule in self.get_all_programs_schedules():
#Getting representations of the program and the schedule
program_json = self.read_program_json(program)
schedule_json = self.read_schedule_json(program, schedule)
exec_time = self.read_exec_time(program, schedule)
type_program = self.type_of_program(program_json)
type_schedule = self.type_of_schedule(schedule_json)
comp_size = self.computation_size(program_json)
interchange_params = self.interchange_params(schedule_json)
tiling_params = self.tiling_params(schedule_json)
unrolling_params = self.unrolling_params(schedule_json)
results[program, schedule] = {
"exec_time": exec_time,
"program_type": type_program,
"schedule_type": type_schedule,
"comp_size": comp_size,
"interchange": interchange_params,
"tiling":tiling_params,
"unrolling":unrolling_params
}
keys, vals = list(zip(*results.items()))
index = pd.MultiIndex.from_tuples(keys, names=("program", "schedule"))
order_of_cols = ['exec_time', 'no_schedule', 'speedup',
'program_type', 'schedule_type',
'interchange', 'tiling', 'unrolling']
self.results = pd.DataFrame(list(vals), index=index)
self.results = self.calculate_stats()[order_of_cols]
return self.results
def computation_size(self, program_repr):
loops = program_repr['loops']['loops_array']
iterators = [loop['loop_it'] for loop in loops]
iterators = [it for it in program_repr['iterators']['iterators_array'] if it['it_id'] in iterators]
iterators = map(lambda x: x['upper_bound'] - x['lower_bound'], iterators)
return reduce(lambda x, y: x*y, iterators)
def type_of_program(self, program_representation):
return self.computation_types_dict[program_representation['type']]
def type_of_schedule(self, schedule_representation):
interchange = int((len(schedule_representation['interchange_dims']) > 0))
tiling = int((schedule_representation['tiling'] is not None))
unrolling = int((schedule_representation['unrolling_factor'] is not None))
return str((interchange, tiling, unrolling))
def read_program_json(self, program):
full_path = self.data_path + self.stats_folder + '/' + program + '/' + program + '.json'
json_dict = json.load(open(full_path, 'r'))
try:
json_dict = json.load(open(full_path, 'r'))
except Exception:
print(program)
exit(1)
return json_dict
def read_schedule_json(self, program, schedule):
full_path = self.data_path + self.stats_folder + '/' + program + '/' + schedule \
+ '/' + schedule + '.json'
json_dict = json.load(open(full_path, 'r'))
return json_dict
# def patch(self):
# full_path = self.data_path + self.programs_folder
# for program,schedule in self.get_all_programs_schedules():
# if not self.check(program, schedule):
# self.exec_command("rm -rf " + full_path + program +'/' +schedule +'/')
# def check(self, program, schedule):
# full_path = self.data_path + self.programs_folder + '/' + program + '/' + schedule \
# + '/' + schedule + '.json'
# json_dict = json.load(open(full_path, 'r'))
# if 0 in json_dict['interchange_dims']:
# if not json_dict['tiling']:
# return True
# if json_dict['tiling']['tiling_dims'][0] != json_dict['interchange_dims'][1]:
# return True
# return False
def calculate_stats(self):
df = self.results
df['no_schedule'] = 0.0
for program in df.index.levels[0]:
#get no_schedule exec time
no_sched = float(df.loc[program] [df.loc[program].index.str.endswith('no_schedule')].exec_time)
df.loc[program, 'no_schedule'] = no_sched
# df.loc[program, 'speedup'] = (df.loc[program, 'exec_time'] / no_sched).values
df['speedup'] = df.no_schedule / df.exec_time
df = df.sort_values(by=["program", "speedup"], ascending=[True, False])
self.results = df
return df
def unrolling_params(self, schedule_repr):
unrolling = schedule_repr['unrolling_factor']
result = None
if unrolling is not None:
result = str(unrolling)
return result
def interchange_params(self, schedule_repr):
interchange = schedule_repr['interchange_dims']
result = None
if len(interchange) > 0:
result = str(tuple(interchange))
return result
def tiling_params(self, schedule_repr):
tiling = schedule_repr['tiling']
result = None
if tiling is not None:
dims = tiling['tiling_dims']
factors = tiling['tiling_factors']
result = str(dict(zip(dims, factors)))
return result
def show_random_func(self):
func = random.choice(list(self.results.values()))
x, y = zip(*func.items())
index = [i for i in range(len(x)) if x[i].endswith("no_schedule")][0]
bar_list = plt.bar(x, y)
bar_list[index].set_color('r')
plt.xticks(x, rotation=20)
plt.ylabel("Execution time (ms)")
plt.show()
def exec_command(self, command):
ret_code = subprocess.call(command.split())
if ret_code == 1:
print(command)
exit(1)
if ret_code != 0:
print(f"Return code {ret_code}")
print(f"Error executing command {command} \n")
def program_to_ast(self, program):
#read json in dictionary
program_dict = self.read_program_json(program)
#transform to ast
program = Loop_AST(program, program_dict)
return program
def schedule_repr(self, program, schedule):
#read json in dict
schedule_dict = self.read_schedule_json(program, schedule)
#get representation
schedule = Schedule(schedule, schedule_dict)
return schedule
def read_exec_time(self, program, schedule):
full_path = self.data_path + self.stats_folder + '/'
exec_time = np.NaN
#Getting the execution time of the schedule
with open(full_path + '/'+program + '/' + schedule +'/exec_times.txt', 'r') as f:
exec_times = f.readlines()
if len(exec_times) > 0:
r = re.compile(r"[0-9]+(.[0-9]+)?(e\+[0-9]+)?")
exec_times = [r.match(value) for value in exec_times]
exec_times = np.array([val.group(0) for val in exec_times
if val is not None], dtype='float64')
exec_time = np.median(exec_times)/1000
f.close()
return exec_time
def read_exec_times(self, program, schedule):
full_path = self.data_path + self.stats_folder + '/'
exec_times = []
#Getting the execution time of the schedule
with open(full_path + '/'+program + '/' + schedule +'/exec_times.txt', 'r') as f:
exec_times = f.readlines()
r = re.compile(r"[0-9]+(.[0-9]+)?(e\+[0-9]+)?")
exec_times = [r.match(value) for value in exec_times]
exec_times = np.array([val.group(0) for val in exec_times
if val is not None], dtype='float64')
f.close()
return exec_times
def load_data(self):
'''
Returns (programs, schedules, times)
'''
progs_array = []
schedules_array = []
exec_times_array = []
programs = sorted(self.get_all_programs())
for program in programs:
print(program)
prog_ast = self.program_to_ast(program)
progs_array.append(prog_ast)
progs_schedules = []
progs_times = []
schedules = sorted(list(self.get_all_schedules(program)))
for schedule in schedules:
#get schedule representation
schedule_repr = self.schedule_repr(program, schedule)
progs_schedules.append(schedule_repr)
t = self.read_exec_time(program, schedule)
progs_times.append(t)
schedules_array.append(progs_schedules)
exec_times_array.append(progs_times)
return (progs_array, schedules_array, exec_times_array)
def load_data_separate_exec_times(self,execTimesFile):
'''
Loads data exec times from a separate .pickle file
Returns (programs, schedules, times)
'''
exec_times_tuples=[] # contains a list of tuples (func_id, sched_id, e, median, speedup)
# for fileName in execTimesFilesList:
with open(execTimesFile, "rb") as f:
exec_times_tuples.extend(pickle.load(f))
progs_array = []
schedules_array = []
exec_times_array = []
exec_times_tuples=sorted(exec_times_tuples, key = lambda x: x[1])
programs = sorted(self.get_all_programs())
i = 0
for program in programs:
print(program)
prog_ast = self.program_to_ast(program)
progs_array.append(prog_ast)
progs_schedules = []
progs_times = []
schedules = sorted(list(self.get_all_schedules(program)))
for schedule in schedules:
#get schedule representation
schedule_repr = self.schedule_repr(program, schedule)
progs_schedules.append(schedule_repr)
# t = self.read_exec_time(program, schedule)
assert exec_times_tuples[i][1]==schedule,'error on appending exec times at '+schedule
# if (exec_times_tuples[i][1]!=schedule):
# print("error")
# print((exec_times_tuples[i][1],schedule))
# raise Exception('error on appending exec times')
# print((exec_times_tuples[i][1],exec_times_tuples[i][3]/1000,schedule))
t=exec_times_tuples[i][3]/1000
progs_times.append(t)
i+=1
schedules_array.append(progs_schedules)
exec_times_array.append(progs_times)
return (progs_array, schedules_array, exec_times_array)
if __name__=="__main__":
fire.Fire(Stats)
| [
"pandas.MultiIndex.from_tuples",
"fire.Fire",
"matplotlib.pyplot.show",
"numpy.median",
"numpy.std",
"matplotlib.pyplot.bar",
"numpy.mean",
"pickle.load",
"functools.reduce",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"os.listdir",
"re.compile"
] | [((16494, 16510), 'fire.Fire', 'fire.Fire', (['Stats'], {}), '(Stats)\n', (16503, 16510), False, 'import fire\n'), ((1249, 1281), 'os.listdir', 'listdir', (['(self.data_path + folder)'], {}), '(self.data_path + folder)\n', (1256, 1281), False, 'from os import listdir\n'), ((2117, 2165), 'os.listdir', 'listdir', (["(self.data_path + folder + '/' + program)"], {}), "(self.data_path + folder + '/' + program)\n", (2124, 2165), False, 'from os import listdir\n'), ((4993, 5055), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['keys'], {'names': "('program', 'schedule')"}), "(keys, names=('program', 'schedule'))\n", (5018, 5055), True, 'import pandas as pd\n'), ((6686, 6748), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['keys'], {'names': "('program', 'schedule')"}), "(keys, names=('program', 'schedule'))\n", (6711, 6748), True, 'import pandas as pd\n'), ((7471, 7508), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'iterators'], {}), '(lambda x, y: x * y, iterators)\n', (7477, 7508), False, 'from functools import reduce\n'), ((11087, 11100), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {}), '(x, y)\n', (11094, 11100), True, 'import matplotlib.pyplot as plt\n'), ((11148, 11174), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x'], {'rotation': '(20)'}), '(x, rotation=20)\n', (11158, 11174), True, 'import matplotlib.pyplot as plt\n'), ((11183, 11216), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Execution time (ms)"""'], {}), "('Execution time (ms)')\n", (11193, 11216), True, 'import matplotlib.pyplot as plt\n'), ((11225, 11235), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11233, 11235), True, 'import matplotlib.pyplot as plt\n'), ((634, 657), 'os.listdir', 'listdir', (['self.data_path'], {}), '(self.data_path)\n', (641, 657), False, 'from os import listdir\n'), ((13159, 13202), 're.compile', 're.compile', (['"""[0-9]+(.[0-9]+)?(e\\\\+[0-9]+)?"""'], {}), "('[0-9]+(.[0-9]+)?(e\\\\+[0-9]+)?')\n", (13169, 13202), False, 'import re\n'), ((4558, 4577), 'numpy.mean', 'np.mean', (['exec_times'], {}), '(exec_times)\n', (4565, 4577), True, 'import numpy as np\n'), ((4637, 4658), 'numpy.median', 'np.median', (['exec_times'], {}), '(exec_times)\n', (4646, 4658), True, 'import numpy as np\n'), ((4715, 4733), 'numpy.std', 'np.std', (['exec_times'], {}), '(exec_times)\n', (4721, 4733), True, 'import numpy as np\n'), ((12410, 12453), 're.compile', 're.compile', (['"""[0-9]+(.[0-9]+)?(e\\\\+[0-9]+)?"""'], {}), "('[0-9]+(.[0-9]+)?(e\\\\+[0-9]+)?')\n", (12420, 12453), False, 'import re\n'), ((14922, 14936), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14933, 14936), False, 'import pickle\n'), ((12732, 12753), 'numpy.median', 'np.median', (['exec_times'], {}), '(exec_times)\n', (12741, 12753), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rectv
import numpy as np
import dxchange
import tomopy
def rec_tv(data,m,nsp,rot_center,
lambda0,lambda1,niters,ngpus):
"""
Reconstruct. Time-domain decomposition + regularization.
"""
[nframes, nproj, ns, n] = data.shape
if (rot_center<n//2):
data = data[:,:,:,:n//2+rot_center-1]
if (rot_center>n//2):
data = data[:,:,:,rot_center-n//2:]
n = data.shape[3]
# reorder input data for compatibility
data = np.reshape(data,[nframes*nproj,ns,n])
data = np.ndarray.flatten(data.swapaxes(0, 1))
# memory for result
rec = np.zeros([n*n*ns*m], dtype='float32')
# Make a class for tv
cl = rectv.rectv(n, nframes*nproj, m, nframes, ns,
ns, ngpus, lambda0, lambda1)
# Run iterations
cl.itertvR_wrap(rec, data, niters)
# reorder result for compatibility with tomopy
rec = np.rot90(np.reshape(rec, [ns, m, n, n]).swapaxes(0, 1), axes=(
2, 3))/nproj*2
# take slices corresponding to angles k\pi
rec = rec[::m//nframes]
return rec
def rec(data,rot_center):
"""
Reconstruct with Gridrec.
"""
[nframes, nproj, ns, n] = data.shape
theta = np.linspace(0, np.pi*nframes, nproj*nframes, endpoint=False)
# Reconstruct object. FBP.
rec = np.zeros([nframes, ns, n, n], dtype='float32')
for time_frame in range(0, nframes):
rec0 = tomopy.recon(data[time_frame], theta[time_frame*nproj:(time_frame+1)*nproj], center=rot_center-np.mod(time_frame, 2), algorithm='gridrec')
# Mask each reconstructed slice with a circle.
rec[time_frame] = tomopy.circ_mask(rec0, axis=0, ratio=0.95)
return rec
if __name__ == "__main__":
data = np.load("data.npy") # load continuous data
rot_center = 252
nsp = 4 # number of slices to process simultaniously by gpus
m = 8 # number of basis functions, must be a multiple of nframes
lambda0 = pow(2, -9) # regularization parameter 1
lambda1 = pow(2, 2) # regularization parameter 2
niters = 1024 # number of iterations
ngpus = 1 # number of gpus
rtv = rec_tv(data,m,nsp,rot_center,lambda0,lambda1,niters,ngpus)
for k in range(rtv.shape[0]):
dxchange.write_tiff_stack(rtv[k],'rec_tv/rec_'+str(k))
r = rec(data,rot_center)
for k in range(r.shape[0]):
dxchange.write_tiff_stack(r[k],'rec/rec_'+str(k))
| [
"numpy.load",
"numpy.zeros",
"rectv.rectv",
"numpy.mod",
"numpy.reshape",
"numpy.linspace",
"tomopy.circ_mask"
] | [((525, 567), 'numpy.reshape', 'np.reshape', (['data', '[nframes * nproj, ns, n]'], {}), '(data, [nframes * nproj, ns, n])\n', (535, 567), True, 'import numpy as np\n'), ((653, 696), 'numpy.zeros', 'np.zeros', (['[n * n * ns * m]'], {'dtype': '"""float32"""'}), "([n * n * ns * m], dtype='float32')\n", (661, 696), True, 'import numpy as np\n'), ((729, 805), 'rectv.rectv', 'rectv.rectv', (['n', '(nframes * nproj)', 'm', 'nframes', 'ns', 'ns', 'ngpus', 'lambda0', 'lambda1'], {}), '(n, nframes * nproj, m, nframes, ns, ns, ngpus, lambda0, lambda1)\n', (740, 805), False, 'import rectv\n'), ((1261, 1325), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * nframes)', '(nproj * nframes)'], {'endpoint': '(False)'}), '(0, np.pi * nframes, nproj * nframes, endpoint=False)\n', (1272, 1325), True, 'import numpy as np\n'), ((1363, 1409), 'numpy.zeros', 'np.zeros', (['[nframes, ns, n, n]'], {'dtype': '"""float32"""'}), "([nframes, ns, n, n], dtype='float32')\n", (1371, 1409), True, 'import numpy as np\n'), ((1790, 1809), 'numpy.load', 'np.load', (['"""data.npy"""'], {}), "('data.npy')\n", (1797, 1809), True, 'import numpy as np\n'), ((1686, 1728), 'tomopy.circ_mask', 'tomopy.circ_mask', (['rec0'], {'axis': '(0)', 'ratio': '(0.95)'}), '(rec0, axis=0, ratio=0.95)\n', (1702, 1728), False, 'import tomopy\n'), ((1561, 1582), 'numpy.mod', 'np.mod', (['time_frame', '(2)'], {}), '(time_frame, 2)\n', (1567, 1582), True, 'import numpy as np\n'), ((956, 986), 'numpy.reshape', 'np.reshape', (['rec', '[ns, m, n, n]'], {}), '(rec, [ns, m, n, n])\n', (966, 986), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# This file was generated
import array # noqa: F401
import ctypes
import datetime # noqa: F401
# Used by @ivi_synchronized
from functools import wraps
import nifgen._attributes as _attributes
import nifgen._converters as _converters
import nifgen._library_singleton as _library_singleton
import nifgen._visatype as _visatype
import nifgen.enums as enums
import nifgen.errors as errors
import nitclk
# Used for __repr__
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Helper functions for creating ctypes needed for calling into the driver DLL
def get_ctypes_pointer_for_buffer(value=None, library_type=None, size=None):
if isinstance(value, array.array):
assert library_type is not None, 'library_type is required for array.array'
addr, _ = value.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(library_type))
elif str(type(value)).find("'numpy.ndarray'") != -1:
import numpy
return numpy.ctypeslib.as_ctypes(value)
elif isinstance(value, bytes):
return ctypes.cast(value, ctypes.POINTER(library_type))
elif isinstance(value, list):
assert library_type is not None, 'library_type is required for list'
return (library_type * len(value))(*value)
else:
if library_type is not None and size is not None:
return (library_type * size)()
else:
return None
def get_ctypes_and_array(value, array_type):
if value is not None:
if isinstance(value, array.array):
value_array = value
else:
value_array = array.array(array_type, value)
else:
value_array = None
return value_array
class _Generation(object):
def __init__(self, session):
self._session = session
self._session._initiate_generation()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.abort()
# From https://stackoverflow.com/questions/5929107/decorators-with-parameters
def ivi_synchronized(f):
@wraps(f)
def aux(*xs, **kws):
session = xs[0] # parameter 0 is 'self' which is the session object
with session.lock():
return f(*xs, **kws)
return aux
class _Lock(object):
def __init__(self, session):
self._session = session
def __enter__(self):
# _lock_session is called from the lock() function, not here
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.unlock()
class _RepeatedCapabilities(object):
def __init__(self, session, prefix, current_repeated_capability_list):
self._session = session
self._prefix = prefix
# We need at least one element. If we get an empty list, make the one element an empty string
self._current_repeated_capability_list = current_repeated_capability_list if len(current_repeated_capability_list) > 0 else ['']
# Now we know there is at lease one entry, so we look if it is an empty string or not
self._separator = '/' if len(self._current_repeated_capability_list[0]) > 0 else ''
def __getitem__(self, repeated_capability):
'''Set/get properties or call methods with a repeated capability (i.e. channels)'''
rep_caps_list = _converters.convert_repeated_capabilities(repeated_capability, self._prefix)
complete_rep_cap_list = [current_rep_cap + self._separator + rep_cap for current_rep_cap in self._current_repeated_capability_list for rep_cap in rep_caps_list]
return _SessionBase(vi=self._session._vi, repeated_capability_list=complete_rep_cap_list, library=self._session._library, encoding=self._session._encoding, freeze_it=True)
# This is a very simple context manager we can use when we need to set/get attributes
# or call functions from _SessionBase that require no channels. It is tied to the specific
# implementation of _SessionBase and how repeated capabilities are handled.
class _NoChannel(object):
def __init__(self, session):
self._session = session
def __enter__(self):
self._repeated_capability_cache = self._session._repeated_capability
self._session._repeated_capability = ''
def __exit__(self, exc_type, exc_value, traceback):
self._session._repeated_capability = self._repeated_capability_cache
class _SessionBase(object):
'''Base class for all NI-FGEN sessions.'''
# This is needed during __init__. Without it, __setattr__ raises an exception
_is_frozen = False
absolute_delay = _attributes.AttributeViReal64(1150413)
'''Type: float
Specifies the sub-Sample Clock delay, in seconds, to apply to the
waveform. Use this property to reduce the trigger jitter when
synchronizing multiple devices with NI-TClk. This property can also help
maintain synchronization repeatability by writing the absolute delay
value of a previous measurement to the current session.
To set this property, the waveform generator must be in the Idle
(Configuration) state.
**Units**: seconds (s)
**Valid Values**: Plus or minus half of one Sample Clock period
**Default Value**: 0.0
**Supported Waveform Generators**: PXIe-5413/5423/5433
Note:
If this property is set, NI-TClk cannot perform any sub-Sample Clock
adjustment.
'''
all_marker_events_latched_status = _attributes.AttributeViInt32(1150349)
'''Type: int
Returns a bit field of the latched status of all Marker Events. Write 0 to this property to clear the latched status of all Marker Events.
'''
all_marker_events_live_status = _attributes.AttributeViInt32(1150344)
'''Type: int
Returns a bit field of the live status of all Marker Events.
'''
analog_data_mask = _attributes.AttributeViInt32(1150234)
'''Type: int
Specifies the mask to apply to the analog output. The masked data is replaced with the data in analog_static_value.
'''
analog_filter_enabled = _attributes.AttributeViBoolean(1150103)
'''Type: bool
Controls whether the signal generator applies to an analog filter to the output signal. This property is valid in arbitrary waveform, arbitrary sequence, and script modes. This property can also be used in standard method and frequency list modes for user-defined waveforms.
'''
analog_path = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.AnalogPath, 1150222)
'''Type: enums.AnalogPath
Specifies the analog signal path that should be used. The main path allows you to configure gain, offset, analog filter status, output impedance, and output enable. The main path has two amplifier options, high- and low-gain.
The direct path presents a much smaller gain range, and you cannot adjust offset or the filter status. The direct path also provides a smaller output range but also lower distortion. NI-FGEN normally chooses the amplifier based on the user-specified gain.
'''
analog_static_value = _attributes.AttributeViInt32(1150235)
'''Type: int
Specifies the static value that replaces data masked by analog_data_mask.
'''
arb_gain = _attributes.AttributeViReal64(1250202)
'''Type: float
Specifies the factor by which the signal generator scales the arbitrary waveform data. When you create arbitrary waveforms, you must first normalize the data points to the range -1.0 to +1.0. Use this property to scale the arbitrary waveform to other ranges.
For example, when you set this property to 2.0, the output signal ranges from -2.0 V to +2.0 V.
Use this property when output_mode is set to OutputMode.ARB or OutputMode.SEQ.
'''
arb_marker_position = _attributes.AttributeViInt32(1150327)
'''Type: int
Specifies the position for a marker to be asserted in the arbitrary waveform. This property defaults to -1 when no marker position is specified. Use this property when output_mode is set to OutputMode.ARB.
Use ExportSignal to export the marker signal.
Note:
One or more of the referenced methods are not in the Python API for this driver.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
arb_offset = _attributes.AttributeViReal64(1250203)
'''Type: float
Specifies the value that the signal generator adds to the arbitrary waveform data. When you create arbitrary waveforms, you must first normalize the data points to the range -1.0 to +1.0. Use this property to shift the arbitrary waveform range.
For example, when you set this property to 1.0, the output signal ranges from 2.0 V to 0.0 V.
Use this property when output_mode is set to OutputMode.ARB or OutputMode.SEQ.
Units: Volts
'''
arb_repeat_count = _attributes.AttributeViInt32(1150328)
'''Type: int
Specifies number of times to repeat the arbitrary waveform when the triggerMode parameter of ConfigureTriggerMode is set to TriggerMode.SINGLE or TriggerMode.STEPPED. This property is ignored if the triggerMode parameter is set to TriggerMode.CONTINUOUS or TriggerMode.BURST. Use this property when output_mode is set to OutputMode.ARB.
When used during streaming, this property specifies the number of times to repeat the streaming waveform (the onboard memory allocated for streaming). For more information about streaming, refer to the Streaming topic.
'''
arb_sample_rate = _attributes.AttributeViReal64(1250204)
'''Type: float
Specifies the rate at which the signal generator outputs the points in arbitrary waveforms. Use this property when output_mode is set to OutputMode.ARB or OutputMode.SEQ.
Units: Samples/s
'''
arb_sequence_handle = _attributes.AttributeViInt32(1250211)
'''Type: int
This channel-based property identifies which sequence the signal generator produces. You can create multiple sequences using create_arb_sequence. create_arb_sequence returns a handle that you can use to identify the particular sequence. To configure the signal generator to produce a particular sequence, set this property to the sequence handle.
Use this property only when output_mode is set to OutputMode.SEQ.
'''
arb_waveform_handle = _attributes.AttributeViInt32(1250201)
'''Type: int
Selects which arbitrary waveform the signal generator produces. You can create multiple arbitrary waveforms using one of the following niFgen Create Waveform methods:
create_waveform
create_waveform
create_waveform_from_file_i16
create_waveform_from_file_f64
CreateWaveformFromFileHWS
These methods return a handle that you can use to identify the particular waveform. To configure the signal generator to produce a particular waveform, set this property to the waveform handle.
Use this property only when output_mode is set to OutputMode.ARB.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
aux_power_enabled = _attributes.AttributeViBoolean(1150411)
'''Type: bool
Controls the specified auxiliary power pin. Setting this property to TRUE energizes the auxiliary power when the session is committed. When this property is FALSE, the power pin of the connector outputs no power.
'''
bus_type = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.BusType, 1150215)
'''Type: enums.BusType
The bus type of the signal generator.
'''
channel_delay = _attributes.AttributeViReal64(1150369)
'''Type: float
Specifies, in seconds, the delay to apply to the analog output of the channel specified by the channel string. You can use the channel delay to configure the timing relationship between channels on a multichannel device. Values for this property can be zero or positive. A value of zero indicates that the channels are aligned. A positive value delays the analog output by the specified number of seconds.
'''
clock_mode = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ClockMode, 1150110)
'''Type: enums.ClockMode
Controls which clock mode is used for the signal generator.
For signal generators that support it, this property allows switching the sample clock to High-Resolution mode. When in Divide-Down mode, the sample rate can only be set to certain frequences, based on dividing down the update clock. However, in High-Resolution mode, the sample rate may be set to any value.
'''
common_mode_offset = _attributes.AttributeViReal64(1150366)
'''Type: float
Specifies, in volts, the value the signal generator adds to or subtracts from the arbitrary waveform data. This property applies only when you set the terminal_configuration property to TerminalConfiguration.DIFFERENTIAL. Common mode offset is applied to the signals generated at each differential output terminal.
'''
data_marker_events_count = _attributes.AttributeViInt32(1150273)
'''Type: int
Returns the number of Data Marker Events supported by the device.
'''
data_marker_event_data_bit_number = _attributes.AttributeViInt32(1150337)
'''Type: int
Specifies the bit number to assign to the Data Marker Event.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
data_marker_event_level_polarity = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.DataMarkerEventLevelPolarity, 1150338)
'''Type: enums.DataMarkerEventLevelPolarity
Specifies the output polarity of the Data marker event.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
data_marker_event_output_terminal = _attributes.AttributeViString(1150339)
'''Type: str
Specifies the destination terminal for the Data Marker Event.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
data_transfer_block_size = _attributes.AttributeViInt32(1150241)
'''Type: int
The number of samples at a time to download to onboard memory. Useful when the total data to be transferred to onboard memory is large.
'''
data_transfer_maximum_bandwidth = _attributes.AttributeViReal64(1150373)
'''Type: float
Specifies the maximum amount of bus bandwidth (in bytes per second) to use for data transfers. The signal generator limits data transfer speeds on the PCIe bus to the value you specify for this property. Set this property to optimize bus bandwidth usage for multi-device streaming applications by preventing the signal generator from consuming all of the available bandwidth on a PCI express link when waveforms are being written to the onboard memory of the device.
'''
data_transfer_maximum_in_flight_reads = _attributes.AttributeViInt32(1150375)
'''Type: int
Specifies the maximum number of concurrent PCI Express read requests the signal generator can issue.
When transferring data from computer memory to device onboard memory across the PCI Express bus, the signal generator can issue multiple memory reads at the same time. In general, the larger the number of read requests, the more efficiently the device uses the bus because the multiple read requests keep the data flowing, even in a PCI Express topology that has high latency due to PCI Express switches in the data path. Most NI devices can issue a large number of read requests (typically 8 or 16). By default, this property is set to the highest value the signal generator supports.
If other devices in your system cannot tolerate long data latencies, it may be helpful to decrease the number of in-flight read requests the NI signal generator issues. This helps to reduce the amount of data the signal generator reads at one time.
'''
data_transfer_preferred_packet_size = _attributes.AttributeViInt32(1150374)
'''Type: int
Specifies the preferred size of the data field in a PCI Express read request packet. In general, the larger the packet size, the more efficiently the device uses the bus. By default, NI signal generators use the largest packet size allowed by the system. However, due to different system implementations, some systems may perform better with smaller packet sizes.
Recommended values for this property are powers of two between 64 and 512.
In some cases, the signal generator generates packets smaller than the preferred size you set with this property.
You cannot change this property while the device is generating a waveform. If you want to change the device configuration, call the abort method or wait for the generation to complete.
Note:
:
'''
digital_data_mask = _attributes.AttributeViInt32(1150236)
'''Type: int
Specifies the mask to apply to the output on the digital connector. The masked data is replaced with the data in digital_static_value.
'''
digital_edge_script_trigger_edge = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ScriptTriggerDigitalEdgeEdge, 1150292)
'''Type: enums.ScriptTriggerDigitalEdgeEdge
Specifies the active edge for the Script trigger. This property is used when script_trigger_type is set to Digital Edge.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
digital_edge_script_trigger_source = _attributes.AttributeViString(1150291)
'''Type: str
Specifies the source terminal for the Script trigger. This property is used when script_trigger_type is set to Digital Edge.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
digital_edge_start_trigger_edge = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.StartTriggerDigitalEdgeEdge, 1150282)
'''Type: enums.StartTriggerDigitalEdgeEdge
Specifies the active edge for the Start trigger. This property is used only when start_trigger_type is set to Digital Edge.
'''
digital_edge_start_trigger_source = _attributes.AttributeViString(1150281)
'''Type: str
Specifies the source terminal for the Start trigger. This property is used only when start_trigger_type is set to Digital Edge.
'''
digital_filter_enabled = _attributes.AttributeViBoolean(1150102)
'''Type: bool
Controls whether the signal generator applies a digital filter to the output signal. This property is valid in arbitrary waveform, arbitrary sequence, and script modes. This property can also be used in standard method and frequency list modes for user-defined waveforms.
'''
digital_filter_interpolation_factor = _attributes.AttributeViReal64(1150218)
'''Type: float
This property only affects the device when digital_filter_enabled is set to True. If you do not set this property directly, NI-FGEN automatically selects the maximum interpolation factor allowed for the current sample rate. Valid values are 2, 4, and 8.
'''
digital_gain = _attributes.AttributeViReal64(1150254)
'''Type: float
Specifies a factor by which the signal generator digitally multiplies generated data before converting it to an analog signal in the DAC. For a digital gain greater than 1.0, the product of digital gain times the generated data must be inside the range plus or minus 1.0 (assuming floating point data). If the product exceeds these limits, the signal generator clips the output signal, and an error results.
Some signal generators support both digital gain and an analog gain (analog gain is specified with the func_amplitude property or the arb_gain property). Digital gain can be changed during generation without the glitches that may occur when changing analog gains, due to relay switching. However, the DAC output resolution is a method of analog gain, so only analog gain makes full use of the resolution of the DAC.
'''
digital_pattern_enabled = _attributes.AttributeViBoolean(1150101)
'''Type: bool
Controls whether the signal generator generates a digital pattern of the output signal.
'''
digital_static_value = _attributes.AttributeViInt32(1150237)
'''Type: int
Specifies the static value that replaces data masked by digital_data_mask.
'''
done_event_output_terminal = _attributes.AttributeViString(1150315)
'''Type: str
Specifies the destination terminal for the Done Event.
'''
driver_setup = _attributes.AttributeViString(1050007)
'''Type: str
Specifies the driver setup portion of the option string that was passed into the InitWithOptions method.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
exported_onboard_reference_clock_output_terminal = _attributes.AttributeViString(1150322)
'''Type: str
Specifies the terminal to which to export the Onboard Reference Clock.
'''
exported_reference_clock_output_terminal = _attributes.AttributeViString(1150321)
'''Type: str
Specifies the terminal to which to export the Reference Clock.
'''
exported_sample_clock_divisor = _attributes.AttributeViInt32(1150219)
'''Type: int
Specifies the factor by which to divide the Sample clock, also known as the Update clock, before it is exported. To export the Sample clock, use the ExportSignal method or the exported_sample_clock_output_terminal property.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
exported_sample_clock_output_terminal = _attributes.AttributeViString(1150320)
'''Type: str
Specifies the terminal to which to export the Sample Clock.
'''
exported_sample_clock_timebase_divisor = _attributes.AttributeViInt32(1150230)
'''Type: int
Specifies the factor by which to divide the sample clock timebase (board clock) before it is exported. To export the Sample clock timebase, use the ExportSignal method or the exported_sample_clock_timebase_output_terminal property.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
exported_sample_clock_timebase_output_terminal = _attributes.AttributeViString(1150329)
'''Type: str
Specifies the terminal to which to export the Sample clock timebase. If you specify a divisor with the exported_sample_clock_timebase_divisor property, the Sample clock exported with the exported_sample_clock_timebase_output_terminal property is the value of the Sample clock timebase after it is divided-down. For a list of the terminals available on your device, refer to the Device Routes tab in MAX.
To change the device configuration, call abort or wait for the generation to complete.
Note: The signal generator must not be in the Generating state when you change this property.
'''
exported_script_trigger_output_terminal = _attributes.AttributeViString(1150295)
'''Type: str
Specifies the output terminal for the exported Script trigger.
Setting this property to an empty string means that when you commit the session, the signal is removed from that terminal and, if possible, the terminal is tristated.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
exported_start_trigger_output_terminal = _attributes.AttributeViString(1150283)
'''Type: str
Specifies the destination terminal for exporting the Start trigger.
'''
external_clock_delay_binary_value = _attributes.AttributeViInt32(1150233)
'''Type: int
Binary value of the external clock delay.
'''
external_sample_clock_multiplier = _attributes.AttributeViReal64(1150376)
'''Type: float
Specifies a multiplication factor to use to obtain a desired sample rate from an external Sample clock. The resulting sample rate is equal to this factor multiplied by the external Sample clock rate. You can use this property to generate samples at a rate higher than your external clock rate. When using this property, you do not need to explicitly set the external clock rate.
'''
file_transfer_block_size = _attributes.AttributeViInt32(1150240)
'''Type: int
The number of samples at a time to read from the file and download to onboard memory. Used in conjunction with the Create From File and Write From File methods.
'''
filter_correction_frequency = _attributes.AttributeViReal64(1150104)
'''Type: float
Controls the filter correction frequency of the analog filter. This property corrects for the ripples in the analog filter frequency response at the frequency specified. For standard waveform output, the filter correction frequency should be set to be the same as the frequency of the standard waveform. To have no filter correction, set this property to 0 Hz.
'''
flatness_correction_enabled = _attributes.AttributeViBoolean(1150323)
'''Type: bool
When True, the signal generator applies a flatness correction factor to the generated sine wave in order to ensure the same output power level at all frequencies.
This property should be set to False when performing Flatness Calibration.
'''
fpga_bitfile_path = _attributes.AttributeViString(1150412)
'''Type: str
Gets the absolute file path to the bitfile loaded on the FPGA.
'''
freq_list_duration_quantum = _attributes.AttributeViReal64(1150214)
'''Type: float
Returns the quantum of which all durations must be a multiple in a frequency list.
'''
freq_list_handle = _attributes.AttributeViInt32(1150208)
'''Type: int
Sets which frequency list the signal generator produces. Create a frequency list using create_freq_list. create_freq_list returns a handle that you can use to identify the list.
'''
func_amplitude = _attributes.AttributeViReal64(1250102)
'''Type: float
Controls the amplitude of the standard waveform that the signal generator produces. This value is the amplitude at the output terminal.
For example, to produce a waveform ranging from -5.00 V to +5.00 V, set the amplitude to 10.00 V.
set the Waveform parameter to Waveform.DC.
Units: Vpk-pk
Note: This parameter does not affect signal generator behavior when you
'''
func_buffer_size = _attributes.AttributeViInt32(1150238)
'''Type: int
This property contains the number of samples used in the standard method waveform buffer. This property is only valid on devices that implement standard method mode in software, and is read-only for all other devices.
implementation of Standard Method Mode on your device.
Note: Refer to the Standard Method Mode topic for more information on the
'''
func_dc_offset = _attributes.AttributeViReal64(1250103)
'''Type: float
Controls the DC offset of the standard waveform that the signal generator produces. This value is the offset at the output terminal. The value is the offset from ground to the center of the waveform that you specify with the Waveform parameter.
For example, to configure a waveform with an amplitude of 10.00 V to range from 0.00 V to +10.00 V, set DC Offset to 5.00 V.
Units: volts
'''
func_duty_cycle_high = _attributes.AttributeViReal64(1250106)
'''Type: float
Controls the duty cycle of the square wave the signal generator produces. Specify this property as a percentage of the time the square wave is high in a cycle.
set the Waveform parameter to Waveform.SQUARE.
Units: Percentage of time the waveform is high
Note: This parameter only affects signal generator behavior when you
'''
func_frequency = _attributes.AttributeViReal64(1250104)
'''Type: float
Controls the frequency of the standard waveform that the signal generator produces.
Units: hertz
(1) This parameter does not affect signal generator behavior when you set the Waveform parameter of the configure_standard_waveform method to Waveform.DC.
(2) For Waveform.SINE, the range is between 0 MHz and 16 MHz, but the range is between 0 MHz and 1 MHz for all other waveforms.
Note:
:
'''
func_max_buffer_size = _attributes.AttributeViInt32(1150239)
'''Type: int
This property sets the maximum number of samples that can be used in the standard method waveform buffer. Increasing this value may increase the quality of the waveform. This property is only valid on devices that implement standard method mode in software, and is read-only for all other devices.
implementation of Standard Method Mode on your device.
Note: Refer to the Standard Method Mode topic for more information on the
'''
func_start_phase = _attributes.AttributeViReal64(1250105)
'''Type: float
Controls horizontal offset of the standard waveform the signal generator produces. Specify this property in degrees of one waveform cycle.
A start phase of 180 degrees means output generation begins halfway through the waveform. A start phase of 360 degrees offsets the output by an entire waveform cycle, which is identical to a start phase of 0 degrees.
set the Waveform parameter to Waveform.DC.
Units: Degrees of one cycle
Note: This parameter does not affect signal generator behavior when you
'''
func_waveform = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.Waveform, 1250101)
'''Type: enums.Waveform
This channel-based property specifies which standard waveform the signal generator produces.
Use this property only when output_mode is set to OutputMode.FUNC.
Waveform.SINE - Sinusoid waveform
Waveform.SQUARE - Square waveform
Waveform.TRIANGLE - Triangle waveform
Waveform.RAMP_UP - Positive ramp waveform
Waveform.RAMP_DOWN - Negative ramp waveform
Waveform.DC - Constant voltage
Waveform.NOISE - White noise
Waveform.USER - User-defined waveform as defined with
define_user_standard_waveform
'''
idle_behavior = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.IdleBehavior, 1150377)
'''Type: enums.IdleBehavior
Specifies the behavior of the output during the Idle state. The output can be configured to hold the last generated voltage before entering the Idle state or jump to the Idle Value.
'''
idle_value = _attributes.AttributeViInt32(1150378)
'''Type: int
Specifies the value to generate in the Idle state. The Idle Behavior must be configured to jump to this value.
'''
instrument_firmware_revision = _attributes.AttributeViString(1050510)
'''Type: str
A string that contains the firmware revision information for the device that you are currently using.
'''
instrument_manufacturer = _attributes.AttributeViString(1050511)
'''Type: str
A string that contains the name of the device manufacturer you are currently using.
'''
instrument_model = _attributes.AttributeViString(1050512)
'''Type: str
A string that contains the model number or name of the device that you are currently using.
'''
io_resource_descriptor = _attributes.AttributeViString(1050304)
'''Type: str
Indicates the resource descriptor that NI-FGEN uses to identify the physical device.
If you initialize NI-FGEN with a logical name, this property contains the resource descriptor that corresponds to the entry in the IVI Configuration Utility.
If you initialize NI-FGEN with the resource descriptor, this property contains that value.
'''
load_impedance = _attributes.AttributeViReal64(1150220)
'''Type: float
This channel-based property specifies the load impedance connected to the analog output of the channel. If you set this property to NIFGEN_VAL_MATCHED_LOAD_IMPEDANCE (-1.0), NI-FGEN assumes that the load impedance matches the output impedance. NI-FGEN compensates to give the desired peak-to-peak voltage amplitude or arbitrary gain (relative to 1 V).
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
'''
logical_name = _attributes.AttributeViString(1050305)
'''Type: str
A string containing the logical name that you specified when opening the current IVI session.
You may pass a logical name to init or InitWithOptions. The IVI Configuration Utility must contain an entry for the logical name. The logical name entry refers to a virtual instrument section in the IVI Configuration file. The virtual instrument section specifies a physical device and initial user options.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
marker_events_count = _attributes.AttributeViInt32(1150271)
'''Type: int
Returns the number of markers supported by the device. Use this property when output_mode is set to OutputMode.SCRIPT.
'''
marker_event_output_terminal = _attributes.AttributeViString(1150312)
'''Type: str
Specifies the destination terminal for the Marker Event.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
max_freq_list_duration = _attributes.AttributeViReal64(1150213)
'''Type: float
Returns the maximum duration of any one step in the frequency list.
'''
max_freq_list_length = _attributes.AttributeViInt32(1150211)
'''Type: int
Returns the maximum number of steps that can be in a frequency list.
'''
max_loop_count = _attributes.AttributeViInt32(1250215)
'''Type: int
Returns the maximum number of times that the signal generator can repeat a waveform in a sequence. Typically, this value is constant for the signal generator.
'''
max_num_freq_lists = _attributes.AttributeViInt32(1150209)
'''Type: int
Returns the maximum number of frequency lists the signal generator allows.
'''
max_num_sequences = _attributes.AttributeViInt32(1250212)
'''Type: int
Returns the maximum number of arbitrary sequences that the signal generator allows. Typically, this value is constant for the signal generator.
'''
max_num_waveforms = _attributes.AttributeViInt32(1250205)
'''Type: int
Returns the maximum number of arbitrary waveforms that the signal generator allows. Typically, this value is constant for the signal generator.
'''
max_sequence_length = _attributes.AttributeViInt32(1250214)
'''Type: int
Returns the maximum number of arbitrary waveforms that the signal generator allows in a sequence. Typically, this value is constant for the signal generator.
'''
max_waveform_size = _attributes.AttributeViInt32(1250208)
'''Type: int
Returns the size, in samples, of the largest waveform that can be created. This property reflects the space currently available, taking into account previously allocated waveforms and instructions.
'''
memory_size = _attributes.AttributeViInt32(1150242)
'''Type: int
The total amount of memory, in bytes, on the signal generator.
'''
min_freq_list_duration = _attributes.AttributeViReal64(1150212)
'''Type: float
Returns the minimum number of steps that can be in a frequency list.
'''
min_freq_list_length = _attributes.AttributeViInt32(1150210)
'''Type: int
Returns the minimum number of frequency lists that the signal generator allows.
'''
min_sequence_length = _attributes.AttributeViInt32(1250213)
'''Type: int
Returns the minimum number of arbitrary waveforms that the signal generator allows in a sequence. Typically, this value is constant for the signal generator.
'''
min_waveform_size = _attributes.AttributeViInt32(1250207)
'''Type: int
Returns the minimum number of points that the signal generator allows in an arbitrary waveform. Typically, this value is constant for the signal generator.
'''
module_revision = _attributes.AttributeViString(1150390)
'''Type: str
A string that contains the module revision for the device that you are currently using.
'''
channel_count = _attributes.AttributeViInt32(1050203)
'''Type: int
Indicates the number of channels that the specific instrument driver supports.
For each property for which IVI_VAL_MULTI_CHANNEL is set, the IVI Engine maintains a separate cache value for each channel.
'''
output_enabled = _attributes.AttributeViBoolean(1250003)
'''Type: bool
This channel-based property specifies whether the signal that the signal generator produces appears at the output connector.
'''
output_impedance = _attributes.AttributeViReal64(1250004)
'''Type: float
This channel-based property specifies the signal generator output impedance at the output connector. NI signal sources modules have an output impedance of 50 ohms and an optional 75 ohms on select modules. If the load impedance matches the output impedance, then the voltage at the signal output connector is at the needed level. The voltage at the signal output connector varies with load output impedance, up to doubling the voltage for a high-impedance load.
'''
output_mode = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.OutputMode, 1250001)
'''Type: enums.OutputMode
Sets which output mode the signal generator will use. The value you specify determines which methods and properties you use to configure the waveform the signal generator produces.
Note: The signal generator must not be in the Generating state when you change this property. To change the device configuration, call abort or wait for the generation to complete.
'''
ready_for_start_event_output_terminal = _attributes.AttributeViString(1150310)
'''Type: str
Specifies the destination terminal for the Ready for Start Event.
'''
reference_clock_source = _attributes.AttributeEnum(_attributes.AttributeViString, enums.ReferenceClockSource, 1150113)
'''Type: enums.ReferenceClockSource
Specifies the reference clock source used by the signal generator.
The signal generator derives the frequencies and sample rates that it uses to generate waveforms from the source you specify. For example, when you set this property to ClkIn, the signal generator uses the signal it receives at the CLK IN front panel connector as the Reference clock.
To change the device configuration, call abort or wait for the generation to complete.
Note: The signal generator must not be in the Generating state when you change this property.
'''
ref_clock_frequency = _attributes.AttributeViReal64(1150107)
'''Type: float
Sets the frequency of the signal generator reference clock. The signal generator uses the reference clock to derive frequencies and sample rates when generating output.
'''
sample_clock_source = _attributes.AttributeEnum(_attributes.AttributeViString, enums.SampleClockSource, 1150112)
'''Type: enums.SampleClockSource
Specifies the Sample clock source. If you specify a divisor with the exported_sample_clock_divisor property, the Sample clock exported with the exported_sample_clock_output_terminal property is the value of the Sample clock after it is divided-down. For a list of the terminals available on your device, refer to the Device Routes tab in MAX.
To change the device configuration, call abort or wait for the generation to complete.
Note: The signal generator must not be in the Generating state when you change this property.
'''
sample_clock_timebase_rate = _attributes.AttributeViReal64(1150368)
'''Type: float
Specifies the Sample clock timebase rate. This property applies only to external Sample clock timebases.
To change the device configuration, call abort or wait for the generation to complete.
Note: The signal generator must not be in the Generating state when you change this property.
'''
sample_clock_timebase_source = _attributes.AttributeEnum(_attributes.AttributeViString, enums.SampleClockTimebaseSource, 1150367)
'''Type: enums.SampleClockTimebaseSource
Specifies the Sample Clock Timebase source.
To change the device configuration, call the abort method or wait for the generation to complete.
Note: The signal generator must not be in the Generating state when you change this property.
'''
script_to_generate = _attributes.AttributeViString(1150270)
'''Type: str
Specifies which script the generator produces. To configure the generator to run a particular script, set this property to the name of the script. Use write_script to create multiple scripts. Use this property when output_mode is set to OutputMode.SCRIPT.
Note: The signal generator must not be in the Generating state when you change this property. To change the device configuration, call abort or wait for the generation to complete.
'''
script_triggers_count = _attributes.AttributeViInt32(1150272)
'''Type: int
Specifies the number of Script triggers supported by the device. Use this property when output_mode is set to OutputMode.SCRIPT.
'''
script_trigger_type = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ScriptTriggerType, 1150290)
'''Type: enums.ScriptTriggerType
Specifies the Script trigger type. Depending upon the value of this property, additional properties may need to be configured to fully configure the trigger.
Tip:
This property can use repeated capabilities. If set or get directly on the
nifgen.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling set/get value on the result.
'''
serial_number = _attributes.AttributeViString(1150243)
'''Type: str
The signal generator's serial number.
'''
simulate = _attributes.AttributeViBoolean(1050005)
'''Type: bool
Specifies whether to simulate NI-FGEN I/O operations. If simulation is enabled, NI-FGEN methods perform range checking and call Ivi_GetAttribute and Ivi_SetAttribute, but they do not perform device I/O. For output parameters that represent device data, NI-FGEN methods return calculated values.
Default Value: False
Use InitWithOptions to override default value.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
specific_driver_description = _attributes.AttributeViString(1050514)
'''Type: str
Returns a brief description of NI-FGEN.
'''
major_version = _attributes.AttributeViInt32(1050503)
'''Type: int
Returns the major version number of NI-FGEN.
'''
minor_version = _attributes.AttributeViInt32(1050504)
'''Type: int
Returns the minor version number of NI-FGEN.
'''
specific_driver_revision = _attributes.AttributeViString(1050551)
'''Type: str
A string that contains additional version information about NI-FGEN.
'''
specific_driver_vendor = _attributes.AttributeViString(1050513)
'''Type: str
A string that contains the name of the vendor that supplies NI-FGEN.
'''
started_event_output_terminal = _attributes.AttributeViString(1150314)
'''Type: str
Specifies the destination terminal for the Started Event.
'''
start_trigger_type = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.StartTriggerType, 1150280)
'''Type: enums.StartTriggerType
Specifies whether you want the Start trigger to be a Digital Edge, or Software trigger. You can also choose None as the value for this property.
'''
streaming_space_available_in_waveform = _attributes.AttributeViInt32(1150325)
'''Type: int
Indicates the space available (in samples) in the streaming waveform for writing new data. During generation, this available space may be in multiple locations with, for example, part of the available space at the end of the streaming waveform and the rest at the beginning. In this situation, writing a block of waveform data the size of the total space available in the streaming waveform causes NI-FGEN to return an error, as NI-FGEN will not wrap the data from the end of the waveform to the beginning and cannot write data past the end of the waveform buffer.
To avoid writing data past the end of the waveform, write new data to the waveform in a fixed size that is an integer divisor of the total size of the streaming waveform.
Used in conjunction with the streaming_waveform_handle or streaming_waveform_name properties.
'''
streaming_waveform_handle = _attributes.AttributeViInt32(1150324)
'''Type: int
Specifies the waveform handle of the waveform used to continuously stream data during generation. This property defaults to -1 when no streaming waveform is specified.
Used in conjunction with streaming_space_available_in_waveform.
'''
streaming_waveform_name = _attributes.AttributeViString(1150326)
'''Type: str
Specifies the name of the waveform used to continuously stream data during generation. This property defaults to // when no streaming waveform is specified.
Use in conjunction with streaming_space_available_in_waveform.
'''
streaming_write_timeout = _attributes.AttributeViReal64TimeDeltaSeconds(1150409)
'''Type: float in seconds or datetime.timedelta
Specifies the maximum amount of time allowed to complete a streaming write operation.
'''
supported_instrument_models = _attributes.AttributeViString(1050327)
'''Type: str
Returns a model code of the device. For NI-FGEN versions that support more than one device, this property contains a comma-separated list of supported device models.
'''
terminal_configuration = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.TerminalConfiguration, 1150365)
'''Type: enums.TerminalConfiguration
Specifies whether gain and offset values will be analyzed based on single-ended or differential operation.
'''
trigger_mode = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.TriggerMode, 1150108)
'''Type: enums.TriggerMode
Controls the trigger mode.
'''
wait_behavior = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.WaitBehavior, 1150379)
'''Type: enums.WaitBehavior
Specifies the behavior of the output while waiting for a script trigger or during a wait instruction. The output can be configured to hold the last generated voltage before waiting or jump to the Wait Value.
'''
wait_value = _attributes.AttributeViInt32(1150380)
'''Type: int
Specifies the value to generate while waiting. The Wait Behavior must be configured to jump to this value.
'''
waveform_quantum = _attributes.AttributeViInt32(1250206)
'''Type: int
The size of each arbitrary waveform must be a multiple of a quantum value. This property returns the quantum value that the signal generator allows.
For example, when this property returns a value of 8, all waveform sizes must be a multiple of 8. Typically, this value is constant for the signal generator.
'''
def __init__(self, repeated_capability_list, vi, library, encoding, freeze_it=False):
self._repeated_capability_list = repeated_capability_list
self._repeated_capability = ','.join(repeated_capability_list)
self._vi = vi
self._library = library
self._encoding = encoding
# Store the parameter list for later printing in __repr__
param_list = []
param_list.append("repeated_capability_list=" + pp.pformat(repeated_capability_list))
param_list.append("vi=" + pp.pformat(vi))
param_list.append("library=" + pp.pformat(library))
param_list.append("encoding=" + pp.pformat(encoding))
self._param_list = ', '.join(param_list)
# Instantiate any repeated capability objects
self.channels = _RepeatedCapabilities(self, '', repeated_capability_list)
self.script_triggers = _RepeatedCapabilities(self, 'ScriptTrigger', repeated_capability_list)
self.markers = _RepeatedCapabilities(self, 'Marker', repeated_capability_list)
self._is_frozen = freeze_it
def __repr__(self):
return '{0}.{1}({2})'.format('nifgen', self.__class__.__name__, self._param_list)
def __setattr__(self, key, value):
if self._is_frozen and key not in dir(self):
raise AttributeError("'{0}' object has no attribute '{1}'".format(type(self).__name__, key))
object.__setattr__(self, key, value)
def _get_error_description(self, error_code):
'''_get_error_description
Returns the error description.
'''
try:
_, error_string = self._get_error()
return error_string
except errors.Error:
pass
try:
'''
It is expected for _get_error to raise when the session is invalid
(IVI spec requires GetError to fail).
Use _error_message instead. It doesn't require a session.
'''
error_string = self._error_message(error_code)
return error_string
except errors.Error:
return "Failed to retrieve error description."
''' These are code-generated '''
@ivi_synchronized
def allocate_named_waveform(self, waveform_name, waveform_size):
r'''allocate_named_waveform
Specifies the size of a named waveform up front so that it can be
allocated in onboard memory before loading the associated data. Data can
then be loaded in smaller blocks with the niFgen Write (Binary16)
Waveform methods.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name (str): Specifies the name to associate with the allocated waveform.
waveform_size (int): Specifies the size of the waveform to allocate in samples.
**Default Value**: "4096"
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_name_ctype = ctypes.create_string_buffer(waveform_name.encode(self._encoding)) # case C020
waveform_size_ctype = _visatype.ViInt32(waveform_size) # case S150
error_code = self._library.niFgen_AllocateNamedWaveform(vi_ctype, channel_name_ctype, waveform_name_ctype, waveform_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def allocate_waveform(self, waveform_size):
r'''allocate_waveform
Specifies the size of a waveform so that it can be allocated in onboard
memory before loading the associated data. Data can then be loaded in
smaller blocks with the Write Binary 16 Waveform methods.
Note:
The signal generator must not be in the Generating state when you call
this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_size (int): Specifies, in samples, the size of the waveform to allocate.
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used later
when referring to this waveform.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_size_ctype = _visatype.ViInt32(waveform_size) # case S150
waveform_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_AllocateWaveform(vi_ctype, channel_name_ctype, waveform_size_ctype, None if waveform_handle_ctype is None else (ctypes.pointer(waveform_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(waveform_handle_ctype.value)
@ivi_synchronized
def clear_user_standard_waveform(self):
r'''clear_user_standard_waveform
Clears the user-defined waveform created by the
define_user_standard_waveform method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
error_code = self._library.niFgen_ClearUserStandardWaveform(vi_ctype, channel_name_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def configure_arb_sequence(self, sequence_handle, gain, offset):
r'''configure_arb_sequence
Configures the signal generator properties that affect arbitrary
sequence generation. Sets the arb_sequence_handle,
arb_gain, and arb_offset properties.
Note:
The signal generator must not be in the Generating state when you call
this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
sequence_handle (int): Specifies the handle of the arbitrary sequence that you want the signal
generator to produce. NI-FGEN sets the
arb_sequence_handle property to this value. You can
create an arbitrary sequence using the create_arb_sequence or
create_advanced_arb_sequence method. These methods return a
handle that you use to identify the sequence.
**Default Value**: None
gain (float): Specifies the factor by which the signal generator scales the arbitrary
waveforms in the sequence. When you create an arbitrary waveform, you
must first normalize the data points to a range of –1.00 to +1.00. You
can use this parameter to scale the waveform to other ranges. The gain
is applied before the offset is added.
For example, to configure the output signal to range from –2.00 to
+2.00 V, set **gain** to 2.00.
**Units**: unitless
**Default Value**: None
offset (float): Specifies the value the signal generator adds to the arbitrary waveform
data. When you create arbitrary waveforms, you must first normalize the
data points to a range of –1.00 to +1.00 V. You can use this parameter
to shift the range of the arbitrary waveform. NI-FGEN sets the
arb_offset property to this value.
For example, to configure the output signal to range from 0.00 to 2.00 V
instead of –1.00 to 1.00 V, set the offset to 1.00.
**Units**: volts
**Default Value**: None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
sequence_handle_ctype = _visatype.ViInt32(sequence_handle) # case S150
gain_ctype = _visatype.ViReal64(gain) # case S150
offset_ctype = _visatype.ViReal64(offset) # case S150
error_code = self._library.niFgen_ConfigureArbSequence(vi_ctype, channel_name_ctype, sequence_handle_ctype, gain_ctype, offset_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def configure_arb_waveform(self, waveform_handle, gain, offset):
r'''configure_arb_waveform
Configures the properties of the signal generator that affect arbitrary
waveform generation. Sets the arb_waveform_handle,
arb_gain, and arb_offset properties.
Note:
The signal generator must not be in the Generating state when you call
this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_handle (int): Specifies the handle of the arbitrary waveform you want the signal
generator to produce. NI-FGEN sets the
arb_waveform_handle property to this value. You can
create an arbitrary waveform using one of the following niFgen Create
Waveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
These methods return a handle that you use to identify the waveform.
**Default Value**: None
Note:
One or more of the referenced methods are not in the Python API for this driver.
gain (float): Specifies the factor by which the signal generator scales the arbitrary
waveforms in the sequence. When you create an arbitrary waveform, you
must first normalize the data points to a range of –1.00 to +1.00. You
can use this parameter to scale the waveform to other ranges. The gain
is applied before the offset is added.
For example, to configure the output signal to range from –2.00 to
+2.00 V, set **gain** to 2.00.
**Units**: unitless
**Default Value**: None
offset (float): Specifies the value the signal generator adds to the arbitrary waveform
data. When you create arbitrary waveforms, you must first normalize the
data points to a range of –1.00 to +1.00 V. You can use this parameter
to shift the range of the arbitrary waveform. NI-FGEN sets the
arb_offset property to this value.
For example, to configure the output signal to range from 0.00 to 2.00 V
instead of –1.00 to 1.00 V, set the offset to 1.00.
**Units**: volts
**Default Value**: None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_handle_ctype = _visatype.ViInt32(waveform_handle) # case S150
gain_ctype = _visatype.ViReal64(gain) # case S150
offset_ctype = _visatype.ViReal64(offset) # case S150
error_code = self._library.niFgen_ConfigureArbWaveform(vi_ctype, channel_name_ctype, waveform_handle_ctype, gain_ctype, offset_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def configure_freq_list(self, frequency_list_handle, amplitude, dc_offset=0.0, start_phase=0.0):
r'''configure_freq_list
Configures the properties of the signal generator that affect frequency
list generation (the freq_list_handle,
func_amplitude, func_dc_offset, and
func_start_phase properties).
Note:
The signal generator must not be in the Generating state when you call
this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
frequency_list_handle (int): Specifies the handle of the frequency list that you want the signal
generator to produce. NI-FGEN sets the freq_list_handle
property to this value. You can create a frequency list using the
create_freq_list method, which returns a handle that you use to
identify the list.
**Default Value**: None
amplitude (float): Specifies the amplitude of the standard waveform that you want the
signal generator to produce. This value is the amplitude at the output
terminal. NI-FGEN sets the func_amplitude property to
this value.
For example, to produce a waveform ranging from –5.00 V to +5.00 V, set
the amplitude to 10.00 V.
**Units**: peak-to-peak voltage
**Default Value**: None
Note:
This parameter does not affect signal generator behavior when you set
the **waveform** parameter of the configure_standard_waveform
method to Waveform.DC.
dc_offset (float): Specifies the DC offset of the standard waveform that you want the
signal generator to produce. The value is the offset from ground to the
center of the waveform you specify with the **waveform** parameter,
observed at the output terminal. For example, to configure a waveform
with an amplitude of 10.00 V to range from 0.00 V to +10.00 V, set the
**dcOffset** to 5.00 V. NI-FGEN sets the func_dc_offset
property to this value.
**Units**: volts
**Default Value**: None
start_phase (float): Specifies the horizontal offset of the standard waveform you want the
signal generator to produce. Specify this property in degrees of one
waveform cycle. NI-FGEN sets the func_start_phase
property to this value. A start phase of 180 degrees means output
generation begins halfway through the waveform. A start phase of 360
degrees offsets the output by an entire waveform cycle, which is
identical to a start phase of 0 degrees.
**Units**: degrees of one cycle
**Default Value**: None degrees
Note:
This parameter does not affect signal generator behavior when you set
the **waveform** parameter to Waveform.DC.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
frequency_list_handle_ctype = _visatype.ViInt32(frequency_list_handle) # case S150
amplitude_ctype = _visatype.ViReal64(amplitude) # case S150
dc_offset_ctype = _visatype.ViReal64(dc_offset) # case S150
start_phase_ctype = _visatype.ViReal64(start_phase) # case S150
error_code = self._library.niFgen_ConfigureFreqList(vi_ctype, channel_name_ctype, frequency_list_handle_ctype, amplitude_ctype, dc_offset_ctype, start_phase_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def configure_standard_waveform(self, waveform, amplitude, frequency, dc_offset=0.0, start_phase=0.0):
r'''configure_standard_waveform
Configures the following properties of the signal generator that affect
standard waveform generation:
- func_waveform
- func_amplitude
- func_dc_offset
- func_frequency
- func_start_phase
Note:
You must call the ConfigureOutputMode method with the
**outputMode** parameter set to OutputMode.FUNC before calling
this method.
Note:
One or more of the referenced methods are not in the Python API for this driver.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform (enums.Waveform): Specifies the standard waveform that you want the signal generator to
produce. NI-FGEN sets the func_waveform property to this
value.
****Defined Values****
**Default Value**: Waveform.SINE
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.SINE | Specifies that the signal generator produces a sinusoid waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.SQUARE | Specifies that the signal generator produces a square waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.TRIANGLE | Specifies that the signal generator produces a triangle waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.RAMP_UP | Specifies that the signal generator produces a positive ramp waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.RAMP_DOWN | Specifies that the signal generator produces a negative ramp waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.DC | Specifies that the signal generator produces a constant voltage. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.NOISE | Specifies that the signal generator produces white noise. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.USER | Specifies that the signal generator produces a user-defined waveform as defined with the define_user_standard_waveform method. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
amplitude (float): Specifies the amplitude of the standard waveform that you want the
signal generator to produce. This value is the amplitude at the output
terminal. NI-FGEN sets the func_amplitude property to
this value.
For example, to produce a waveform ranging from –5.00 V to +5.00 V, set
the amplitude to 10.00 V.
**Units**: peak-to-peak voltage
**Default Value**: None
Note:
This parameter does not affect signal generator behavior when you set
the **waveform** parameter of the configure_standard_waveform
method to Waveform.DC.
frequency (float): | Specifies the frequency of the standard waveform that you want the
signal generator to produce. NI-FGEN sets the
func_frequency property to this value.
**Units**: hertz
**Default Value**: None
Note:
This parameter does not affect signal generator behavior when you set
the **waveform** parameter of the configure_standard_waveform
method to Waveform.DC.
dc_offset (float): Specifies the DC offset of the standard waveform that you want the
signal generator to produce. The value is the offset from ground to the
center of the waveform you specify with the **waveform** parameter,
observed at the output terminal. For example, to configure a waveform
with an amplitude of 10.00 V to range from 0.00 V to +10.00 V, set the
**dcOffset** to 5.00 V. NI-FGEN sets the func_dc_offset
property to this value.
**Units**: volts
**Default Value**: None
start_phase (float): Specifies the horizontal offset of the standard waveform that you want
the signal generator to produce. Specify this parameter in degrees of
one waveform cycle. NI-FGEN sets the func_start_phase
property to this value. A start phase of 180 degrees means output
generation begins halfway through the waveform. A start phase of 360
degrees offsets the output by an entire waveform cycle, which is
identical to a start phase of 0 degrees.
**Units**: degrees of one cycle
**Default Value**: 0.00
Note:
This parameter does not affect signal generator behavior when you set
the **waveform** parameter to Waveform.DC.
'''
if type(waveform) is not enums.Waveform:
raise TypeError('Parameter waveform must be of type ' + str(enums.Waveform))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_ctype = _visatype.ViInt32(waveform.value) # case S130
amplitude_ctype = _visatype.ViReal64(amplitude) # case S150
dc_offset_ctype = _visatype.ViReal64(dc_offset) # case S150
frequency_ctype = _visatype.ViReal64(frequency) # case S150
start_phase_ctype = _visatype.ViReal64(start_phase) # case S150
error_code = self._library.niFgen_ConfigureStandardWaveform(vi_ctype, channel_name_ctype, waveform_ctype, amplitude_ctype, dc_offset_ctype, frequency_ctype, start_phase_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def create_waveform(self, waveform_data_array):
'''create_waveform
Creates an onboard waveform for use in Arbitrary Waveform output mode or Arbitrary Sequence output mode.
Note: You must set output_mode to OutputMode.ARB or OutputMode.SEQ before calling this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_data_array (iterable of float or int16): Array of data for the new arbitrary waveform. This may be an iterable of float or int16, or for best performance a numpy.ndarray of dtype int16 or float64.
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used in other methods when referring to this waveform.
'''
# Check the type by using string comparison so that we don't import numpy unecessarilly.
if str(type(waveform_data_array)).find("'numpy.ndarray'") != -1:
import numpy
if waveform_data_array.dtype == numpy.float64:
return self._create_waveform_f64_numpy(waveform_data_array)
elif waveform_data_array.dtype == numpy.int16:
return self._create_waveform_i16_numpy(waveform_data_array)
else:
raise TypeError("Unsupported dtype. Is {0}, expected {1} or {2}".format(waveform_data_array.dtype, numpy.float64, numpy.int16))
elif isinstance(waveform_data_array, array.array):
if waveform_data_array.typecode == 'd':
return self._create_waveform_f64(waveform_data_array)
elif waveform_data_array.typecode == 'h':
return self._create_waveform_i16(waveform_data_array)
else:
raise TypeError("Unsupported dtype. Is {0}, expected {1} or {2}".format(waveform_data_array.typecode, 'd (double)', 'h (16 bit int)'))
return self._create_waveform_f64(waveform_data_array)
@ivi_synchronized
def _create_waveform_f64(self, waveform_data_array):
r'''_create_waveform_f64
Creates an onboard waveform from binary F64 (floating point double) data
for use in Arbitrary Waveform output mode or Arbitrary Sequence output
mode. The **waveformHandle** returned can later be used for setting the
active waveform, changing the data in the waveform, building sequences
of waveforms, or deleting the waveform when it is no longer needed.
Note:
You must call the ConfigureOutputMode method to set the
**outputMode** parameter to OutputMode.ARB or
OutputMode.SEQ before calling this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_data_array (array.array("d")): Specifies the array of data you want to use for the new arbitrary
waveform. The array must have at least as many elements as the value
that you specify in **waveformSize**.
You must normalize the data points in the array to be between –1.00 and
+1.00.
**Default Value**: None
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used later
when referring to this waveform.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_size_ctype = _visatype.ViInt32(0 if waveform_data_array is None else len(waveform_data_array)) # case S160
waveform_data_array_array = get_ctypes_and_array(value=waveform_data_array, array_type="d") # case B550
waveform_data_array_ctype = get_ctypes_pointer_for_buffer(value=waveform_data_array_array, library_type=_visatype.ViReal64) # case B550
waveform_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateWaveformF64(vi_ctype, channel_name_ctype, waveform_size_ctype, waveform_data_array_ctype, None if waveform_handle_ctype is None else (ctypes.pointer(waveform_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(waveform_handle_ctype.value)
@ivi_synchronized
def _create_waveform_f64_numpy(self, waveform_data_array):
r'''_create_waveform_f64
Creates an onboard waveform from binary F64 (floating point double) data
for use in Arbitrary Waveform output mode or Arbitrary Sequence output
mode. The **waveformHandle** returned can later be used for setting the
active waveform, changing the data in the waveform, building sequences
of waveforms, or deleting the waveform when it is no longer needed.
Note:
You must call the ConfigureOutputMode method to set the
**outputMode** parameter to OutputMode.ARB or
OutputMode.SEQ before calling this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_data_array (numpy.array(dtype=numpy.float64)): Specifies the array of data you want to use for the new arbitrary
waveform. The array must have at least as many elements as the value
that you specify in **waveformSize**.
You must normalize the data points in the array to be between –1.00 and
+1.00.
**Default Value**: None
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used later
when referring to this waveform.
'''
import numpy
if type(waveform_data_array) is not numpy.ndarray:
raise TypeError('waveform_data_array must be {0}, is {1}'.format(numpy.ndarray, type(waveform_data_array)))
if numpy.isfortran(waveform_data_array) is True:
raise TypeError('waveform_data_array must be in C-order')
if waveform_data_array.dtype is not numpy.dtype('float64'):
raise TypeError('waveform_data_array must be numpy.ndarray of dtype=float64, is ' + str(waveform_data_array.dtype))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_size_ctype = _visatype.ViInt32(0 if waveform_data_array is None else len(waveform_data_array)) # case S160
waveform_data_array_ctype = get_ctypes_pointer_for_buffer(value=waveform_data_array) # case B510
waveform_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateWaveformF64(vi_ctype, channel_name_ctype, waveform_size_ctype, waveform_data_array_ctype, None if waveform_handle_ctype is None else (ctypes.pointer(waveform_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(waveform_handle_ctype.value)
@ivi_synchronized
def create_waveform_from_file_f64(self, file_name, byte_order):
r'''create_waveform_from_file_f64
This method takes the floating point double (F64) data from the
specified file and creates an onboard waveform for use in Arbitrary
Waveform or Arbitrary Sequence output mode. The **waveformHandle**
returned by this method can later be used for setting the active
waveform, changing the data in the waveform, building sequences of
waveforms, or deleting the waveform when it is no longer needed.
Note:
The F64 data must be between –1.0 and +1.0 V. Use the
digital_gain property to generate different voltage
outputs.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
file_name (str): The full path and name of the file where the waveform data resides.
byte_order (enums.ByteOrder): Specifies the byte order of the data in the file.
****Defined Values****
|
| ****Default Value**:** ByteOrder.LITTLE
+------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
| ByteOrder.LITTLE | Little Endian Data—The least significant bit is stored at the lowest address, followed by the other bits, in order of increasing significance. |
+------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
| ByteOrder.BIG | Big Endian Data—The most significant bit is stored at the lowest address, followed by the other bits, in order of decreasing significance. |
+------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
Note:
Data written by most applications in Windows (including
LabWindows™/CVI™) is in Little Endian format. Data written to a file
from LabVIEW is in Big Endian format by default on all platforms. Big
Endian and Little Endian refer to the way data is stored in memory,
which can differ on different processors.
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used later
when referring to this waveform.
'''
if type(byte_order) is not enums.ByteOrder:
raise TypeError('Parameter byte_order must be of type ' + str(enums.ByteOrder))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
file_name_ctype = ctypes.create_string_buffer(file_name.encode(self._encoding)) # case C020
byte_order_ctype = _visatype.ViInt32(byte_order.value) # case S130
waveform_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateWaveformFromFileF64(vi_ctype, channel_name_ctype, file_name_ctype, byte_order_ctype, None if waveform_handle_ctype is None else (ctypes.pointer(waveform_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(waveform_handle_ctype.value)
@ivi_synchronized
def create_waveform_from_file_i16(self, file_name, byte_order):
r'''create_waveform_from_file_i16
Takes the binary 16-bit signed integer (I16) data from the specified
file and creates an onboard waveform for use in Arbitrary Waveform or
Arbitrary Sequence output mode. The **waveformHandle** returned by this
method can later be used for setting the active waveform, changing the
data in the waveform, building sequences of waveforms, or deleting the
waveform when it is no longer needed.
Note:
The I16 data (values between –32768 and +32767) is assumed to
represent –1 to +1 V. Use the digital_gain property to
generate different voltage outputs.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
file_name (str): The full path and name of the file where the waveform data resides.
byte_order (enums.ByteOrder): Specifies the byte order of the data in the file.
****Defined Values****
|
| ****Default Value**:** ByteOrder.LITTLE
+------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
| ByteOrder.LITTLE | Little Endian Data—The least significant bit is stored at the lowest address, followed by the other bits, in order of increasing significance. |
+------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
| ByteOrder.BIG | Big Endian Data—The most significant bit is stored at the lowest address, followed by the other bits, in order of decreasing significance. |
+------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
Note:
Data written by most applications in Windows (including
LabWindows™/CVI™) is in Little Endian format. Data written to a file
from LabVIEW is in Big Endian format by default on all platforms. Big
Endian and Little Endian refer to the way data is stored in memory,
which can differ on different processors.
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used later
when referring to this waveform.
'''
if type(byte_order) is not enums.ByteOrder:
raise TypeError('Parameter byte_order must be of type ' + str(enums.ByteOrder))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
file_name_ctype = ctypes.create_string_buffer(file_name.encode(self._encoding)) # case C020
byte_order_ctype = _visatype.ViInt32(byte_order.value) # case S130
waveform_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateWaveformFromFileI16(vi_ctype, channel_name_ctype, file_name_ctype, byte_order_ctype, None if waveform_handle_ctype is None else (ctypes.pointer(waveform_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(waveform_handle_ctype.value)
@ivi_synchronized
def _create_waveform_i16_numpy(self, waveform_data_array):
r'''_create_waveform_i16
Creates an onboard waveform from binary 16-bit signed integer (I16) data
for use in Arbitrary Waveform or Arbitrary Sequence output mode. The
**waveformHandle** returned can later be used for setting the active
waveform, changing the data in the waveform, building sequences of
waveforms, or deleting the waveform when it is no longer needed.
Note:
You must call the ConfigureOutputMode method to set the
**outputMode** parameter to OutputMode.ARB or
OutputMode.SEQ before calling this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_data_array (numpy.array(dtype=numpy.int16)): Specify the array of data that you want to use for the new arbitrary
waveform. The array must have at least as many elements as the value
that you specify in the Waveform Size parameter.
You must normalize the data points in the array to be between -32768 and
+32767.
****Default Value**:** None
Returns:
waveform_handle (int): The handle that identifies the new waveform. This handle is used later
when referring to this waveform.
'''
import numpy
if type(waveform_data_array) is not numpy.ndarray:
raise TypeError('waveform_data_array must be {0}, is {1}'.format(numpy.ndarray, type(waveform_data_array)))
if numpy.isfortran(waveform_data_array) is True:
raise TypeError('waveform_data_array must be in C-order')
if waveform_data_array.dtype is not numpy.dtype('int16'):
raise TypeError('waveform_data_array must be numpy.ndarray of dtype=int16, is ' + str(waveform_data_array.dtype))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_size_ctype = _visatype.ViInt32(0 if waveform_data_array is None else len(waveform_data_array)) # case S160
waveform_data_array_ctype = get_ctypes_pointer_for_buffer(value=waveform_data_array) # case B510
waveform_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateWaveformI16(vi_ctype, channel_name_ctype, waveform_size_ctype, waveform_data_array_ctype, None if waveform_handle_ctype is None else (ctypes.pointer(waveform_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(waveform_handle_ctype.value)
@ivi_synchronized
def define_user_standard_waveform(self, waveform_data_array):
r'''define_user_standard_waveform
Defines a user waveform for use in either Standard Method or Frequency
List output mode.
To select the waveform, set the **waveform** parameter to
Waveform.USER with either the configure_standard_waveform
or the create_freq_list method.
The waveform data must be scaled between –1.0 and 1.0. Use the
**amplitude** parameter in the configure_standard_waveform
method to generate different output voltages.
Note:
You must call the ConfigureOutputMode method to set the
**outputMode** parameter to OutputMode.FUNC or
OutputMode.FREQ_LIST before calling this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_data_array (list of float): Specifies the array of data you want to use for the new arbitrary
waveform. The array must have at least as many elements as the value
that you specify in **waveformSize**.
You must normalize the data points in the array to be between –1.00 and
+1.00.
**Default Value**: None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_size_ctype = _visatype.ViInt32(0 if waveform_data_array is None else len(waveform_data_array)) # case S160
waveform_data_array_ctype = get_ctypes_pointer_for_buffer(value=waveform_data_array, library_type=_visatype.ViReal64) # case B550
error_code = self._library.niFgen_DefineUserStandardWaveform(vi_ctype, channel_name_ctype, waveform_size_ctype, waveform_data_array_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _delete_named_waveform(self, waveform_name):
r'''_delete_named_waveform
Removes a previously created arbitrary waveform from the signal
generator memory and invalidates the waveform handle.
Note:
The signal generator must not be in the Generating state when you call
this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name (str): Specifies the name to associate with the allocated waveform.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_name_ctype = ctypes.create_string_buffer(waveform_name.encode(self._encoding)) # case C020
error_code = self._library.niFgen_DeleteNamedWaveform(vi_ctype, channel_name_ctype, waveform_name_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def delete_script(self, script_name):
r'''delete_script
Deletes the specified script from onboard memory.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
script_name (str): Specifies the name of the script you want to delete. The script name
appears in the text of the script following the script keyword.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
script_name_ctype = ctypes.create_string_buffer(script_name.encode(self._encoding)) # case C020
error_code = self._library.niFgen_DeleteScript(vi_ctype, channel_name_ctype, script_name_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def delete_waveform(self, waveform_name_or_handle):
'''delete_waveform
Removes a previously created arbitrary waveform from the signal generator memory.
Note: The signal generator must not be in the Generating state when you call this method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name_or_handle (str or int): The name (str) or handle (int) of an arbitrary waveform previously allocated with allocate_named_waveform, allocate_waveform or create_waveform.
'''
if isinstance(waveform_name_or_handle, str):
return self._delete_named_waveform(waveform_name_or_handle)
else:
return self._clear_arb_waveform(waveform_name_or_handle)
@ivi_synchronized
def _get_attribute_vi_boolean(self, attribute_id):
r'''_get_attribute_vi_boolean
Queries the value of a ViBoolean property.
You can use this method to get the values of instrument-specific
properties and inherent IVI properties. If the property represents an
instrument state, this method performs instrument I/O in the following
cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
Returns:
attribute_value (bool): Returns the current value of the property. Pass the address of a
ViBoolean variable.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViBoolean() # case S220
error_code = self._library.niFgen_GetAttributeViBoolean(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return bool(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_int32(self, attribute_id):
r'''_get_attribute_vi_int32
Queries the value of a ViInt32 property. You can use this method to
get the values of instrument-specific properties and inherent IVI
properties. If the property represents an instrument state, this
method performs instrument I/O in the following cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
Returns:
attribute_value (int): Returns the current value of the property. Pass the address of a
ViInt32 variable.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_GetAttributeViInt32(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_real64(self, attribute_id):
r'''_get_attribute_vi_real64
Queries the value of a ViReal64 property.
You can use this method to get the values of instrument-specific
properties and inherent IVI properties. If the property represents an
instrument state, this method performs instrument I/O in the following
cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
Returns:
attribute_value (float): Returns the current value of the property. Pass the address of a
ViReal64 variable.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niFgen_GetAttributeViReal64(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return float(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_string(self, attribute_id):
r'''_get_attribute_vi_string
Queries the value of a ViString property.
You can use this method to get the values of instrument-specific
properties and inherent IVI properties. If the property represents an
instrument state, this method performs instrument I/O in the following
cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid.
You must provide a ViChar array to serve as a buffer for the value. You
pass the number of bytes in the buffer as the **arraySize** parameter.
If the current value of the property, including the terminating NUL
byte, is larger than the size you indicate in the **arraySize**
parameter, the method copies **arraySize** – 1 bytes into the buffer,
places an ASCII NUL byte at the end of the buffer, and returns the array
size you must pass to get the entire value. For example, if the value is
123456 and **arraySize** is 4, the method places 123 into the buffer
and returns 7.
If you want to call this method just to get the required array size,
you can pass 0 for **arraySize** and VI_NULL for the **attributeValue**
buffer.
If you want the method to fill in the buffer regardless of the number
of bytes in the value, pass a negative number for the **arraySize**
parameter.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
Returns:
attribute_value (str): The buffer in which the method returns the current value of the
property. The buffer must be a ViChar data type and have at least as
many bytes as indicated in the **arraySize** parameter.
If the current value of the property, including the terminating NUL
byte, contains more bytes than you indicate in this parameter, the
method copies **arraySize** – 1 bytes into the buffer, places an ASCII
NUL byte at the end of the buffer, and returns the array size you must
pass to get the entire value. For example, if the value is 123456 and
**arraySize** is 4, the method places 123 into the buffer and returns
7.
If you specify 0 for the **arraySize** parameter, you can pass VI_NULL
for this parameter.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
array_size_ctype = _visatype.ViInt32() # case S170
attribute_value_ctype = None # case C050
error_code = self._library.niFgen_GetAttributeViString(vi_ctype, channel_name_ctype, attribute_id_ctype, array_size_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
array_size_ctype = _visatype.ViInt32(error_code) # case S180
attribute_value_ctype = (_visatype.ViChar * array_size_ctype.value)() # case C060
error_code = self._library.niFgen_GetAttributeViString(vi_ctype, channel_name_ctype, attribute_id_ctype, array_size_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return attribute_value_ctype.value.decode(self._encoding)
def _get_error(self):
r'''_get_error
Returns the error information associated with an IVI session or with the
current execution thread. If you specify a valid IVI session for the
**vi** parameter, this method retrieves and then clears the error
information for the session. If you pass VI_NULL for the **vi**
parameter, this method retrieves and then clears the error information
for the current execution thread.
The IVI Engine also maintains this error information separately for each
thread. This feature is useful if you do not have a session handle to
pass to the _get_error or ClearError methods. This
situation occurs when a call to the init or
InitWithOptions method fails.
Returns:
error_code (int): The error code for the session or execution thread.
A value of VI_SUCCESS (0) indicates that no error occurred. A positive
value indicates a warning. A negative value indicates an error.
You can call _error_message to get a text description of the
value.
If you are not interested in this value, you can pass VI_NULL.
error_description (str): The error description string for the session or execution thread. If the
error code is nonzero, the description string can further describe the
error or warning condition.
If you are not interested in this value, you can pass VI_NULL.
Otherwise, you must pass a ViChar array of a size specified with the
**errorDescriptionBufferSize** parameter.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code_ctype = _visatype.ViStatus() # case S220
error_description_buffer_size_ctype = _visatype.ViInt32() # case S170
error_description_ctype = None # case C050
error_code = self._library.niFgen_GetError(vi_ctype, None if error_code_ctype is None else (ctypes.pointer(error_code_ctype)), error_description_buffer_size_ctype, error_description_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=True)
error_description_buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
error_description_ctype = (_visatype.ViChar * error_description_buffer_size_ctype.value)() # case C060
error_code = self._library.niFgen_GetError(vi_ctype, None if error_code_ctype is None else (ctypes.pointer(error_code_ctype)), error_description_buffer_size_ctype, error_description_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return int(error_code_ctype.value), error_description_ctype.value.decode(self._encoding)
def lock(self):
'''lock
Obtains a multithread lock on the device session. Before doing so, the
software waits until all other execution threads release their locks
on the device session.
Other threads may have obtained a lock on this session for the
following reasons:
- The application called the lock method.
- A call to NI-FGEN locked the session.
- After a call to the lock method returns
successfully, no other threads can access the device session until
you call the unlock method or exit out of the with block when using
lock context manager.
- Use the lock method and the
unlock method around a sequence of calls to
instrument driver methods if you require that the device retain its
settings through the end of the sequence.
You can safely make nested calls to the lock method
within the same thread. To completely unlock the session, you must
balance each call to the lock method with a call to
the unlock method.
Returns:
lock (context manager): When used in a with statement, nifgen.Session.lock acts as
a context manager and unlock will be called when the with block is exited
'''
self._lock_session() # We do not call _lock_session() in the context manager so that this function can
# act standalone as well and let the client call unlock() explicitly. If they do use the context manager,
# that will handle the unlock for them
return _Lock(self)
def _lock_session(self):
'''_lock_session
Actual call to driver
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_LockSession(vi_ctype, None)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return
@ivi_synchronized
def send_software_edge_trigger(self, trigger=None, trigger_id=None):
'''send_software_edge_trigger
Sends a command to trigger the signal generator. This VI can act as an
override for an external edge trigger.
Note:
This VI does not override external digital edge triggers of the
NI 5401/5411/5431.
Args:
trigger (enums.Trigger): Trigger specifies the type of software trigger to send
+----------------+
| Defined Values |
+================+
| Trigger.START |
+----------------+
| Trigger.SCRIPT |
+----------------+
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
trigger_id (str): Trigger ID specifies the Script Trigger to use for triggering.
'''
if trigger is None or trigger_id is None:
import warnings
warnings.warn('trigger and trigger_id should now always be passed in to the method', category=DeprecationWarning)
# We look at whether we are called directly on the session or a repeated capability container to determine how to behave
if len(self._repeated_capability) > 0:
trigger_id = self._repeated_capability
trigger = enums.Trigger.SCRIPT
else:
trigger_id = "None"
trigger = enums.Trigger.START
elif trigger is not None and trigger_id is not None:
pass # This is how the function should be called
else:
raise ValueError('Both trigger ({0}) and trigger_id ({1}) should be passed in to the method'.format(str(trigger), str(trigger_id)))
if type(trigger) is not enums.Trigger:
raise TypeError('Parameter trigger must be of type ' + str(enums.Trigger))
vi_ctype = _visatype.ViSession(self._vi) # case S110
trigger_ctype = _visatype.ViInt32(trigger.value) # case S130
trigger_id_ctype = ctypes.create_string_buffer(trigger_id.encode(self._encoding)) # case C020
error_code = self._library.niFgen_SendSoftwareEdgeTrigger(vi_ctype, trigger_ctype, trigger_id_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_boolean(self, attribute_id, attribute_value):
r'''_set_attribute_vi_boolean
Sets the value of a ViBoolean property.
This is a low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid or
is different than the value you specify.
NI-FGEN contains high-level methods that set most of the instrument
properties. NI recommends that you use the high-level driver methods
as much as possible. They handle order dependencies and multithread
locking for you. In addition, they perform status checking only after
setting all of the properties. In contrast, when you set multiple
properties using the Set Property methods, the methods check the
instrument status after each call.
Also, when state caching is enabled, the high-level methods that
configure multiple properties perform instrument I/O only for the
properties whose value you change. Thus, you can safely call the
high-level methods without the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
attribute_value (bool): Specifies the value to which you want to set the property. **Default
Value**: None
Note:
Some of the values might not be valid depending on the current
settings of the instrument session.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViBoolean(attribute_value) # case S150
error_code = self._library.niFgen_SetAttributeViBoolean(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_int32(self, attribute_id, attribute_value):
r'''_set_attribute_vi_int32
Sets the value of a ViInt32 property.
This is a low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid or
is different than the value you specify.
NI-FGEN contains high-level methods that set most of the instrument
properties. NI recommends that you use the high-level driver methods
as much as possible. They handle order dependencies and multithread
locking for you. In addition, they perform status checking only after
setting all of the properties. In contrast, when you set multiple
properties using the Set Property methods, the methods check the
instrument status after each call.
Also, when state caching is enabled, the high-level methods that
configure multiple properties perform instrument I/O only for the
properties whose value you change. Thus, you can safely call the
high-level methods without the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
attribute_value (int): Specifies the value to which you want to set the property. **Default
Value**: None
Note:
Some of the values might not be valid depending on the current
settings of the instrument session.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt32(attribute_value) # case S150
error_code = self._library.niFgen_SetAttributeViInt32(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_real64(self, attribute_id, attribute_value):
r'''_set_attribute_vi_real64
Sets the value of a ViReal64 property.
This is a low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid or
is different than the value you specify.
NI-FGEN contains high-level methods that set most of the instrument
properties. NI recommends that you use the high-level driver methods
as much as possible. They handle order dependencies and multithread
locking for you. In addition, they perform status checking only after
setting all of the properties. In contrast, when you set multiple
properties using the Set Property methods, the methods check the
instrument status after each call.
Also, when state caching is enabled, the high-level methods that
configure multiple properties perform instrument I/O only for the
properties whose value you change. Thus, you can safely call the
high-level methods without the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
attribute_value (float): Specifies the value to which you want to set the property. **Default
Value**: None
Note:
Some of the values might not be valid depending on the current
settings of the instrument session.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViReal64(attribute_value) # case S150
error_code = self._library.niFgen_SetAttributeViReal64(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_string(self, attribute_id, attribute_value):
r'''_set_attribute_vi_string
Sets the value of a ViString property.
This is a low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases:
- State caching is disabled for the entire session or for the
particular property.
- State caching is enabled and the currently cached value is invalid or
is different than the value you specify.
NI-FGEN contains high-level methods that set most of the instrument
properties. NI recommends that you use the high-level driver methods
as much as possible. They handle order dependencies and multithread
locking for you. In addition, they perform status checking only after
setting all of the properties. In contrast, when you set multiple
properties using the Set Property methods, the methods check the
instrument status after each call.
Also, when state caching is enabled, the high-level methods that
configure multiple properties perform instrument I/O only for the
properties whose value you change. Thus, you can safely call the
high-level methods without the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
attribute_id (int): Specifies the ID of a property.
attribute_value (str): Specifies the value to which you want to set the property. **Default
Value**: None
Note:
Some of the values might not be valid depending on the current
settings of the instrument session.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = ctypes.create_string_buffer(attribute_value.encode(self._encoding)) # case C020
error_code = self._library.niFgen_SetAttributeViString(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_named_waveform_next_write_position(self, waveform_name, relative_to, offset):
r'''_set_named_waveform_next_write_position
Sets the position in the waveform to which data is written at the next
write. This method allows you to write to arbitrary locations within
the waveform. These settings apply only to the next write to the
waveform specified by the **waveformHandle** parameter. Subsequent
writes to that waveform begin where the last write left off, unless this
method is called again. The **waveformHandle** passed in must have
been created with a call to one of the following methods:
- allocate_waveform
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name (str): Specifies the name to associate with the allocated waveform.
relative_to (enums.RelativeTo): Specifies the reference position in the waveform. This position and
**offset** together determine where to start loading data into the
waveform.
****Defined Values****
+------------------------+-------------------------------------------------------------------------+
| RelativeTo.START (0) | Use the start of the waveform as the reference position. |
+------------------------+-------------------------------------------------------------------------+
| RelativeTo.CURRENT (1) | Use the current position within the waveform as the reference position. |
+------------------------+-------------------------------------------------------------------------+
offset (int): Specifies the offset from the **relativeTo** parameter at which to start
loading the data into the waveform.
'''
if type(relative_to) is not enums.RelativeTo:
raise TypeError('Parameter relative_to must be of type ' + str(enums.RelativeTo))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_name_ctype = ctypes.create_string_buffer(waveform_name.encode(self._encoding)) # case C020
relative_to_ctype = _visatype.ViInt32(relative_to.value) # case S130
offset_ctype = _visatype.ViInt32(offset) # case S150
error_code = self._library.niFgen_SetNamedWaveformNextWritePosition(vi_ctype, channel_name_ctype, waveform_name_ctype, relative_to_ctype, offset_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def set_next_write_position(self, waveform_name_or_handle, relative_to, offset):
'''set_next_write_position
Sets the position in the waveform at which the next waveform data is
written. This method allows you to write to arbitrary locations within
the waveform. These settings apply only to the next write to the
waveform specified by the waveformHandle parameter. Subsequent writes to
that waveform begin where the last write left off, unless this method
is called again. The waveformHandle passed in must have been created by
a call to the allocate_waveform method or one of the following
create_waveform method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name_or_handle (str or int): The name (str) or handle (int) of an arbitrary waveform previously allocated with allocate_named_waveform, allocate_waveform or create_waveform.
relative_to (enums.RelativeTo): Specifies the reference position in the waveform. This position and
**offset** together determine where to start loading data into the
waveform.
****Defined Values****
+------------------------+-------------------------------------------------------------------------+
| RelativeTo.START (0) | Use the start of the waveform as the reference position. |
+------------------------+-------------------------------------------------------------------------+
| RelativeTo.CURRENT (1) | Use the current position within the waveform as the reference position. |
+------------------------+-------------------------------------------------------------------------+
offset (int): Specifies the offset from **relativeTo** at which to start loading the
data into the waveform.
'''
if isinstance(waveform_name_or_handle, str):
return self._set_named_waveform_next_write_position(waveform_name_or_handle, relative_to, offset)
else:
return self._set_waveform_next_write_position(waveform_name_or_handle, relative_to, offset)
@ivi_synchronized
def _set_waveform_next_write_position(self, waveform_handle, relative_to, offset):
r'''_set_waveform_next_write_position
Sets the position in the waveform at which the next waveform data is
written. This method allows you to write to arbitrary locations within
the waveform. These settings apply only to the next write to the
waveform specified by the waveformHandle parameter. Subsequent writes to
that waveform begin where the last write left off, unless this method
is called again. The waveformHandle passed in must have been created by
a call to the allocate_waveform method or one of the following
niFgen CreateWaveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_handle (int): Specifies the handle of the arbitrary waveform previously allocated with
the allocate_waveform method.
relative_to (enums.RelativeTo): Specifies the reference position in the waveform. This position and
**offset** together determine where to start loading data into the
waveform.
****Defined Values****
+------------------------+-------------------------------------------------------------------------+
| RelativeTo.START (0) | Use the start of the waveform as the reference position. |
+------------------------+-------------------------------------------------------------------------+
| RelativeTo.CURRENT (1) | Use the current position within the waveform as the reference position. |
+------------------------+-------------------------------------------------------------------------+
offset (int): Specifies the offset from **relativeTo** at which to start loading the
data into the waveform.
'''
if type(relative_to) is not enums.RelativeTo:
raise TypeError('Parameter relative_to must be of type ' + str(enums.RelativeTo))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_handle_ctype = _visatype.ViInt32(waveform_handle) # case S150
relative_to_ctype = _visatype.ViInt32(relative_to.value) # case S130
offset_ctype = _visatype.ViInt32(offset) # case S150
error_code = self._library.niFgen_SetWaveformNextWritePosition(vi_ctype, channel_name_ctype, waveform_handle_ctype, relative_to_ctype, offset_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def unlock(self):
'''unlock
Releases a lock that you acquired on an device session using
lock. Refer to lock for additional
information on session locks.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_UnlockSession(vi_ctype, None)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return
@ivi_synchronized
def _write_binary16_waveform_numpy(self, waveform_handle, data):
r'''_write_binary16_waveform
Writes binary data to the waveform in onboard memory. The waveform
handle passed must have been created by a call to the
allocate_waveform or the create_waveform method.
By default, the subsequent call to the write_waveform
method continues writing data from the position of the last sample
written. You can set the write position and offset by calling the
set_next_write_position method. If streaming is enabled,
you can write more data than the allocated waveform size in onboard
memory. Refer to the
`Streaming <REPLACE_DRIVER_SPECIFIC_URL_2(streaming)>`__ topic for more
information about streaming data.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_handle (int): Specifies the handle of the arbitrary waveform previously allocated with
the allocate_waveform method.
data (numpy.array(dtype=numpy.int16)): Specifies the array of data to load into the waveform. The array must
have at least as many elements as the value in **size**. The binary data
is left-justified.
'''
import numpy
if type(data) is not numpy.ndarray:
raise TypeError('data must be {0}, is {1}'.format(numpy.ndarray, type(data)))
if numpy.isfortran(data) is True:
raise TypeError('data must be in C-order')
if data.dtype is not numpy.dtype('int16'):
raise TypeError('data must be numpy.ndarray of dtype=int16, is ' + str(data.dtype))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_handle_ctype = _visatype.ViInt32(waveform_handle) # case S150
size_ctype = _visatype.ViInt32(0 if data is None else len(data)) # case S160
data_ctype = get_ctypes_pointer_for_buffer(value=data) # case B510
error_code = self._library.niFgen_WriteBinary16Waveform(vi_ctype, channel_name_ctype, waveform_handle_ctype, size_ctype, data_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _write_named_waveform_f64(self, waveform_name, data):
r'''_write_named_waveform_f64
Writes floating-point data to the waveform in onboard memory. The
waveform handle passed in must have been created by a call to the
allocate_waveform method or to one of the following niFgen
Create Waveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
By default, the subsequent call to the write_waveform
method continues writing data from the position of the last sample
written. You can set the write position and offset by calling the
set_next_write_position method. If streaming is
enabled, you can write more data than the allocated waveform size in
onboard memory. Refer to the
`Streaming <REPLACE_DRIVER_SPECIFIC_URL_2(streaming)>`__ topic for more
information about streaming data.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name (str): Specifies the name to associate with the allocated waveform.
data (array.array("d")): Specifies the array of data to load into the waveform. The array must
have at least as many elements as the value in **size**.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_name_ctype = ctypes.create_string_buffer(waveform_name.encode(self._encoding)) # case C020
size_ctype = _visatype.ViInt32(0 if data is None else len(data)) # case S160
data_array = get_ctypes_and_array(value=data, array_type="d") # case B550
data_ctype = get_ctypes_pointer_for_buffer(value=data_array, library_type=_visatype.ViReal64) # case B550
error_code = self._library.niFgen_WriteNamedWaveformF64(vi_ctype, channel_name_ctype, waveform_name_ctype, size_ctype, data_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _write_named_waveform_f64_numpy(self, waveform_name, data):
r'''_write_named_waveform_f64
Writes floating-point data to the waveform in onboard memory. The
waveform handle passed in must have been created by a call to the
allocate_waveform method or to one of the following niFgen
Create Waveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
By default, the subsequent call to the write_waveform
method continues writing data from the position of the last sample
written. You can set the write position and offset by calling the
set_next_write_position method. If streaming is
enabled, you can write more data than the allocated waveform size in
onboard memory. Refer to the
`Streaming <REPLACE_DRIVER_SPECIFIC_URL_2(streaming)>`__ topic for more
information about streaming data.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name (str): Specifies the name to associate with the allocated waveform.
data (numpy.array(dtype=numpy.float64)): Specifies the array of data to load into the waveform. The array must
have at least as many elements as the value in **size**.
'''
import numpy
if type(data) is not numpy.ndarray:
raise TypeError('data must be {0}, is {1}'.format(numpy.ndarray, type(data)))
if numpy.isfortran(data) is True:
raise TypeError('data must be in C-order')
if data.dtype is not numpy.dtype('float64'):
raise TypeError('data must be numpy.ndarray of dtype=float64, is ' + str(data.dtype))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_name_ctype = ctypes.create_string_buffer(waveform_name.encode(self._encoding)) # case C020
size_ctype = _visatype.ViInt32(0 if data is None else len(data)) # case S160
data_ctype = get_ctypes_pointer_for_buffer(value=data) # case B510
error_code = self._library.niFgen_WriteNamedWaveformF64(vi_ctype, channel_name_ctype, waveform_name_ctype, size_ctype, data_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _write_named_waveform_i16_numpy(self, waveform_name, data):
r'''_write_named_waveform_i16
Writes binary data to the named waveform in onboard memory.
By default, the subsequent call to the write_waveform
method continues writing data from the position of the last sample
written. You can set the write position and offset by calling the
set_next_write_position method. If streaming is
enabled, you can write more data than the allocated waveform size in
onboard memory. Refer to the
`Streaming <REPLACE_DRIVER_SPECIFIC_URL_2(streaming)>`__ topic for more
information about streaming data.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name (str): Specifies the name to associate with the allocated waveform.
data (numpy.array(dtype=numpy.int16)): Specifies the array of data to load into the waveform. The array must
have at least as many elements as the value in **size**.
'''
import numpy
if type(data) is not numpy.ndarray:
raise TypeError('data must be {0}, is {1}'.format(numpy.ndarray, type(data)))
if numpy.isfortran(data) is True:
raise TypeError('data must be in C-order')
if data.dtype is not numpy.dtype('int16'):
raise TypeError('data must be numpy.ndarray of dtype=int16, is ' + str(data.dtype))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_name_ctype = ctypes.create_string_buffer(waveform_name.encode(self._encoding)) # case C020
size_ctype = _visatype.ViInt32(0 if data is None else len(data)) # case S160
data_ctype = get_ctypes_pointer_for_buffer(value=data) # case B510
error_code = self._library.niFgen_WriteNamedWaveformI16(vi_ctype, channel_name_ctype, waveform_name_ctype, size_ctype, data_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def write_script(self, script):
r'''write_script
Writes a string containing one or more scripts that govern the
generation of waveforms.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
script (str): Contains the text of the script you want to use for your generation
operation. Refer to `scripting
Instructions <REPLACE_DRIVER_SPECIFIC_URL_2(niscripted.chm',%20'scripting_instructions)>`__
for more information about writing scripts.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
script_ctype = ctypes.create_string_buffer(script.encode(self._encoding)) # case C020
error_code = self._library.niFgen_WriteScript(vi_ctype, channel_name_ctype, script_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _write_waveform(self, waveform_handle, data):
r'''_write_waveform
Writes floating-point data to the waveform in onboard memory. The
waveform handle passed in must have been created by a call to the
allocate_waveform method or one of the following niFgen
CreateWaveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
By default, the subsequent call to the write_waveform method
continues writing data from the position of the last sample written. You
can set the write position and offset by calling the
set_next_write_position method. If streaming is enabled,
you can write more data than the allocated waveform size in onboard
memory. Refer to the
`Streaming <REPLACE_DRIVER_SPECIFIC_URL_2(streaming)>`__ topic for more
information about streaming data.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_handle (int): Specifies the handle of the arbitrary waveform previously allocated with
the allocate_waveform method.
data (array.array("d")): Specifies the array of data to load into the waveform. The array must
have at least as many elements as the value in **size**.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_handle_ctype = _visatype.ViInt32(waveform_handle) # case S150
size_ctype = _visatype.ViInt32(0 if data is None else len(data)) # case S160
data_array = get_ctypes_and_array(value=data, array_type="d") # case B550
data_ctype = get_ctypes_pointer_for_buffer(value=data_array, library_type=_visatype.ViReal64) # case B550
error_code = self._library.niFgen_WriteWaveform(vi_ctype, channel_name_ctype, waveform_handle_ctype, size_ctype, data_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _write_waveform_numpy(self, waveform_handle, data):
r'''_write_waveform
Writes floating-point data to the waveform in onboard memory. The
waveform handle passed in must have been created by a call to the
allocate_waveform method or one of the following niFgen
CreateWaveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
By default, the subsequent call to the write_waveform method
continues writing data from the position of the last sample written. You
can set the write position and offset by calling the
set_next_write_position method. If streaming is enabled,
you can write more data than the allocated waveform size in onboard
memory. Refer to the
`Streaming <REPLACE_DRIVER_SPECIFIC_URL_2(streaming)>`__ topic for more
information about streaming data.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_handle (int): Specifies the handle of the arbitrary waveform previously allocated with
the allocate_waveform method.
data (numpy.array(dtype=numpy.float64)): Specifies the array of data to load into the waveform. The array must
have at least as many elements as the value in **size**.
'''
import numpy
if type(data) is not numpy.ndarray:
raise TypeError('data must be {0}, is {1}'.format(numpy.ndarray, type(data)))
if numpy.isfortran(data) is True:
raise TypeError('data must be in C-order')
if data.dtype is not numpy.dtype('float64'):
raise TypeError('data must be numpy.ndarray of dtype=float64, is ' + str(data.dtype))
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
waveform_handle_ctype = _visatype.ViInt32(waveform_handle) # case S150
size_ctype = _visatype.ViInt32(0 if data is None else len(data)) # case S160
data_ctype = get_ctypes_pointer_for_buffer(value=data) # case B510
error_code = self._library.niFgen_WriteWaveform(vi_ctype, channel_name_ctype, waveform_handle_ctype, size_ctype, data_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def write_waveform(self, waveform_name_or_handle, data):
'''write_waveform
Writes data to the waveform in onboard memory.
By default, subsequent calls to this method
continue writing data from the position of the last sample written. You
can set the write position and offset by calling the set_next_write_position
set_next_write_position method.
Tip:
This method requires repeated capabilities. If called directly on the
nifgen.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
nifgen.Session repeated capabilities container, and calling this method on the result.
Args:
waveform_name_or_handle (str or int): The name (str) or handle (int) of an arbitrary waveform previously allocated with allocate_named_waveform, allocate_waveform or create_waveform.
data (list of float): Array of data to load into the waveform. This may be an iterable of float, or for best performance a numpy.ndarray of dtype int16 or float64.
'''
use_named = isinstance(waveform_name_or_handle, str)
# Check the type by using string comparison so that we don't import numpy unecessarilly.
if str(type(data)).find("'numpy.ndarray'") != -1:
import numpy
if data.dtype == numpy.float64:
return self._write_named_waveform_f64_numpy(waveform_name_or_handle, data) if use_named else self._write_waveform_numpy(waveform_name_or_handle, data)
elif data.dtype == numpy.int16:
return self._write_named_waveform_i16_numpy(waveform_name_or_handle, data) if use_named else self._write_binary16_waveform_numpy(waveform_name_or_handle, data)
else:
raise TypeError("Unsupported dtype. Is {0}, expected {1} or {2}".format(data.dtype, numpy.float64, numpy.int16))
elif isinstance(data, array.array):
if data.typecode == 'd':
return self._write_named_waveform_f64(waveform_name_or_handle, data) if use_named else self._write_waveform(waveform_name_or_handle, data)
elif data.typecode == 'h':
return self._write_named_waveform_i16(waveform_name_or_handle, data) if use_named else self._write_binary16_waveform(waveform_name_or_handle, data)
else:
raise TypeError("Unsupported dtype. Is {0}, expected {1} or {2}".format(data.typecode, 'd (double)', 'h (16 bit int)'))
return self._write_named_waveform_f64(waveform_name_or_handle, data) if use_named else self._write_waveform(waveform_name_or_handle, data)
def _error_message(self, error_code):
r'''_error_message
Converts a status code returned by an NI-FGEN method into a
user-readable string.
Args:
error_code (int): Specifies the **status** parameter that is returned from any of the
NI-FGEN methods.
**Default Value**: 0 (VI_SUCCESS)
Returns:
error_message (str): Returns the error message string read from the instrument error message
queue.
You must pass a ViChar array with at least 256 bytes.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code_ctype = _visatype.ViStatus(error_code) # case S150
error_message_ctype = (_visatype.ViChar * 256)() # case C070
error_code = self._library.niFgen_error_message(vi_ctype, error_code_ctype, error_message_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_message_ctype.value.decode(self._encoding)
class Session(_SessionBase):
'''An NI-FGEN session to a National Instruments Signal Generator.'''
def __init__(self, resource_name, channel_name=None, reset_device=False, options={}):
r'''An NI-FGEN session to a National Instruments Signal Generator.
Creates and returns a new NI-FGEN session to the specified channel of a
waveform generator that is used in all subsequent NI-FGEN method
calls.
Args:
resource_name (str): Caution:
Traditional NI-DAQ and NI-DAQmx device names are not case-sensitive.
However, all IVI names, such as logical names, are case-sensitive. If
you use logical names, driver session names, or virtual names in your
program, you must ensure that the name you use matches the name in the
IVI Configuration Store file exactly, without any variations in the case
of the characters.
| Specifies the resource name of the device to initialize.
For Traditional NI-DAQ devices, the syntax is DAQ::\ *n*, where *n* is
the device number assigned by MAX, as shown in Example 1.
For NI-DAQmx devices, the syntax is just the device name specified in
MAX, as shown in Example 2. Typical default names for NI-DAQmx devices
in MAX are Dev1 or PXI1Slot1. You can rename an NI-DAQmx device by
right-clicking on the name in MAX and entering a new name.
An alternate syntax for NI-DAQmx devices consists of DAQ::\ *NI-DAQmx
device name*, as shown in Example 3. This naming convention allows for
the use of an NI-DAQmx device in an application that was originally
designed for a Traditional NI-DAQ device. For example, if the
application expects DAQ::1, you can rename the NI-DAQmx device to 1 in
MAX and pass in DAQ::1 for the resource name, as shown in Example 4.
If you use the DAQ::\ *n* syntax and an NI-DAQmx device name already
exists with that same name, the NI-DAQmx device is matched first.
You can also pass in the name of an IVI logical name or an IVI virtual
name configured with the IVI Configuration utility, as shown in Example
5. A logical name identifies a particular virtual instrument. A virtual
name identifies a specific device and specifies the initial settings for
the session.
+-----------+--------------------------------------+------------------------+---------------------------------+
| Example # | Device Type | Syntax | Variable |
+===========+======================================+========================+=================================+
| 1 | Traditional NI-DAQ device | DAQ::\ *1* | (*1* = device number) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 2 | NI-DAQmx device | *myDAQmxDevice* | (*myDAQmxDevice* = device name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 3 | NI-DAQmx device | DAQ::\ *myDAQmxDevice* | (*myDAQmxDevice* = device name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 4 | NI-DAQmx device | DAQ::\ *2* | (*2* = device name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 5 | IVI logical name or IVI virtual name | *myLogicalName* | (*myLogicalName* = name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
channel_name (str, list, range, tuple): Specifies the channel that this VI uses.
**Default Value**: "0"
reset_device (bool): Specifies whether you want to reset the device during the initialization
procedure. True specifies that the device is reset and performs the
same method as the Reset method.
****Defined Values****
**Default Value**: False
+-------+---------------------+
| True | Reset device |
+-------+---------------------+
| False | Do not reset device |
+-------+---------------------+
options (dict): Specifies the initial value of certain properties for the session. The
syntax for **options** is a dictionary of properties with an assigned
value. For example:
{ 'simulate': False }
You do not have to specify a value for all the properties. If you do not
specify a value for a property, the default value is used.
Advanced Example:
{ 'simulate': True, 'driver_setup': { 'Model': '<model number>', 'BoardType': '<type>' } }
+-------------------------+---------+
| Property | Default |
+=========================+=========+
| range_check | True |
+-------------------------+---------+
| query_instrument_status | False |
+-------------------------+---------+
| cache | True |
+-------------------------+---------+
| simulate | False |
+-------------------------+---------+
| record_value_coersions | False |
+-------------------------+---------+
| driver_setup | {} |
+-------------------------+---------+
Returns:
session (nifgen.Session): A session object representing the device.
'''
super(Session, self).__init__(repeated_capability_list=[], vi=None, library=None, encoding=None, freeze_it=False)
channel_name = _converters.convert_repeated_capabilities_from_init(channel_name)
options = _converters.convert_init_with_options_dictionary(options)
self._library = _library_singleton.get()
self._encoding = 'windows-1251'
# Call specified init function
self._vi = 0 # This must be set before calling _initialize_with_channels().
self._vi = self._initialize_with_channels(resource_name, channel_name, reset_device, options)
self.tclk = nitclk.SessionReference(self._vi)
# Store the parameter list for later printing in __repr__
param_list = []
param_list.append("resource_name=" + pp.pformat(resource_name))
param_list.append("channel_name=" + pp.pformat(channel_name))
param_list.append("reset_device=" + pp.pformat(reset_device))
param_list.append("options=" + pp.pformat(options))
self._param_list = ', '.join(param_list)
self._is_frozen = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def initiate(self):
'''initiate
Initiates signal generation. If you want to abort signal generation,
call the abort method. After the signal generation
is aborted, you can call the initiate method to
cause the signal generator to produce a signal again.
Note:
This method will return a Python context manager that will initiate on entering and abort on exit.
'''
return _Generation(self)
def close(self):
'''close
Performs the following operations:
- Closes the instrument I/O session.
- Destroys the NI-FGEN session and all of its properties.
- Deallocates any memory resources NI-FGEN uses.
Not all signal routes established by calling the ExportSignal
and RouteSignalOut methods are released when the NI-FGEN
session is closed. The following table shows what happens to a signal
route on your device when you call the _close method.
+--------------------+-------------------+------------------+
| Routes To | NI 5401/5411/5431 | Other Devices |
+====================+===================+==================+
| Front Panel | Remain connected | Remain connected |
+--------------------+-------------------+------------------+
| RTSI/PXI Backplane | Remain connected | Disconnected |
+--------------------+-------------------+------------------+
Note:
After calling _close, you cannot use NI-FGEN again until you
call the init or InitWithOptions methods.
Note:
This method is not needed when using the session context manager
'''
try:
self._close()
except errors.DriverError:
self._vi = 0
raise
self._vi = 0
''' These are code-generated '''
@ivi_synchronized
def abort(self):
r'''abort
Aborts any previously initiated signal generation. Call the
initiate method to cause the signal generator to
produce a signal again.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_AbortGeneration(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def clear_arb_memory(self):
r'''clear_arb_memory
Removes all previously created arbitrary waveforms, sequences, and
scripts from the signal generator memory and invalidates all waveform
handles, sequence handles, and waveform names.
Note:
The signal generator must not be in the Generating state when you
call this method.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_ClearArbMemory(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def clear_arb_sequence(self, sequence_handle):
r'''clear_arb_sequence
Removes a previously created arbitrary sequence from the signal
generator memory and invalidates the sequence handle.
Note:
The signal generator must not be in the Generating state when you
call this method.
Args:
sequence_handle (int): Specifies the handle of the arbitrary sequence that you want the signal
generator to remove. You can create an arbitrary sequence using the
create_arb_sequence or create_advanced_arb_sequence method.
These methods return a handle that you use to identify the sequence.
| **Defined Value**:
| NIFGEN_VAL_ALL_SEQUENCES—Remove all sequences from the signal
generator
**Default Value**: None
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
sequence_handle_ctype = _visatype.ViInt32(sequence_handle) # case S150
error_code = self._library.niFgen_ClearArbSequence(vi_ctype, sequence_handle_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _clear_arb_waveform(self, waveform_handle):
r'''_clear_arb_waveform
Removes a previously created arbitrary waveform from the signal
generator memory and invalidates the waveform handle.
Note:
The signal generator must not be in the Generating state when you
call this method.
Args:
waveform_handle (int): Specifies the handle of the arbitrary waveform that you want the signal
generator to remove.
You can create multiple arbitrary waveforms using one of the following
niFgen Create Waveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
**Defined Value**:
NIFGEN_VAL_ALL_WAVEFORMS—Remove all waveforms from the signal
generator.
**Default Value**: None
Note:
One or more of the referenced methods are not in the Python API for this driver.
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
waveform_handle_ctype = _visatype.ViInt32(waveform_handle) # case S150
error_code = self._library.niFgen_ClearArbWaveform(vi_ctype, waveform_handle_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def clear_freq_list(self, frequency_list_handle):
r'''clear_freq_list
Removes a previously created frequency list from the signal generator
memory and invalidates the frequency list handle.
Note:
The signal generator must not be in the Generating state when you
call this method.
Args:
frequency_list_handle (int): Specifies the handle of the frequency list you want the signal generator
to remove. You create multiple frequency lists using
create_freq_list. create_freq_list returns a handle that you
use to identify each list. Specify a value of -1 to clear all frequency
lists.
**Defined Value**
NIFGEN_VAL_ALL_FLISTS—Remove all frequency lists from the signal
generator.
**Default Value**: None
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
frequency_list_handle_ctype = _visatype.ViInt32(frequency_list_handle) # case S150
error_code = self._library.niFgen_ClearFreqList(vi_ctype, frequency_list_handle_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def commit(self):
r'''commit
Causes a transition to the Committed state. This method verifies
property values, reserves the device, and commits the property values
to the device. If the property values are all valid, NI-FGEN sets the
device hardware configuration to match the session configuration. This
method does not support the NI 5401/5404/5411/5431 signal generators.
In the Committed state, you can load waveforms, scripts, and sequences
into memory. If any properties are changed, NI-FGEN implicitly
transitions back to the Idle state, where you can program all session
properties before applying them to the device. This method has no
effect if the device is already in the Committed or Generating state and
returns a successful status value.
Calling this VI before the niFgen Initiate Generation VI is optional but
has the following benefits:
- Routes are committed, so signals are exported or imported.
- Any Reference Clock and external clock circuits are phase-locked.
- A subsequent initiate method can run faster
because the device is already configured.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_Commit(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def create_advanced_arb_sequence(self, waveform_handles_array, loop_counts_array, sample_counts_array=None, marker_location_array=None):
r'''create_advanced_arb_sequence
Creates an arbitrary sequence from an array of waveform handles and an
array of corresponding loop counts. This method returns a handle that
identifies the sequence. You pass this handle to the
configure_arb_sequence method to specify what arbitrary sequence
you want the signal generator to produce.
The create_advanced_arb_sequence method extends on the
create_arb_sequence method by adding the ability to set the
number of samples in each sequence step and to set marker locations.
An arbitrary sequence consists of multiple waveforms. For each waveform,
you specify the number of times the signal generator produces the
waveform before proceeding to the next waveform. The number of times to
repeat a specific waveform is called the loop count.
Note:
The signal generator must not be in the Generating state when you call
this method.
You must call the ConfigureOutputMode method to set the
**outputMode** parameter to OutputMode.SEQ before calling this
method.
Args:
waveform_handles_array (list of int): Specifies the array of waveform handles from which you want to create a
new arbitrary sequence. The array must have at least as many elements as
the value that you specify in **sequenceLength**. Each
**waveformHandlesArray** element has a corresponding **loopCountsArray**
element that indicates how many times that waveform is repeated. You
obtain waveform handles when you create arbitrary waveforms with the
allocate_waveform method or one of the following niFgen
CreateWaveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
**Default Value**: None
loop_counts_array (list of int): Specifies the array of loop counts you want to use to create a new
arbitrary sequence. The array must have at least as many elements as the
value that you specify in the **sequenceLength** parameter. Each
**loopCountsArray** element corresponds to a **waveformHandlesArray**
element and indicates how many times to repeat that waveform. Each
element of the **loopCountsArray** must be less than or equal to the
maximum number of loop counts that the signal generator allows. You can
obtain the maximum loop count from **maximumLoopCount** in the
query_arb_seq_capabilities method.
**Default Value**: None
sample_counts_array (list of int): Specifies the array of sample counts that you want to use to create a
new arbitrary sequence. The array must have at least as many elements as
the value you specify in the **sequenceLength** parameter. Each
**sampleCountsArray** element corresponds to a **waveformHandlesArray**
element and indicates the subset, in samples, of the given waveform to
generate. Each element of the **sampleCountsArray** must be larger than
the minimum waveform size, a multiple of the waveform quantum and no
larger than the number of samples in the corresponding waveform. You can
obtain these values by calling the query_arb_wfm_capabilities
method.
**Default Value**: None
marker_location_array (list of int): Specifies the array of marker locations to where you want a marker to be
generated in the sequence. The array must have at least as many elements
as the value you specify in the **sequenceLength** parameter. Each
**markerLocationArray** element corresponds to a
**waveformHandlesArray** element and indicates where in the waveform a
marker is to generate. The marker location must be less than the size of
the waveform the marker is in. The markers are coerced to the nearest
marker quantum and the coerced values are returned in the
**coercedMarkersArray** parameter.
If you do not want a marker generated for a particular sequence stage,
set this parameter to NIFGEN_VAL_NO_MARKER.
**Defined Value**: NIFGEN_VAL_NO_MARKER
**Default Value**: None
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
Returns:
coerced_markers_array (list of int): Returns an array of all given markers that are coerced (rounded) to the
nearest marker quantum. Not all devices coerce markers.
**Default Value**: None
sequence_handle (int): Returns the handle that identifies the new arbitrary sequence. You can
pass this handle to configure_arb_sequence to generate the
arbitrary sequence.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
sequence_length_ctype = _visatype.ViInt32(0 if waveform_handles_array is None else len(waveform_handles_array)) # case S160
if loop_counts_array is not None and len(loop_counts_array) != len(waveform_handles_array): # case S160
raise ValueError("Length of loop_counts_array and waveform_handles_array parameters do not match.") # case S160
if sample_counts_array is not None and len(sample_counts_array) != len(waveform_handles_array): # case S160
raise ValueError("Length of sample_counts_array and waveform_handles_array parameters do not match.") # case S160
if marker_location_array is not None and len(marker_location_array) != len(waveform_handles_array): # case S160
raise ValueError("Length of marker_location_array and waveform_handles_array parameters do not match.") # case S160
waveform_handles_array_ctype = get_ctypes_pointer_for_buffer(value=waveform_handles_array, library_type=_visatype.ViInt32) # case B550
loop_counts_array_ctype = get_ctypes_pointer_for_buffer(value=loop_counts_array, library_type=_visatype.ViInt32) # case B550
sample_counts_array_ctype = get_ctypes_pointer_for_buffer(value=sample_counts_array, library_type=_visatype.ViInt32) # case B550
marker_location_array_ctype = get_ctypes_pointer_for_buffer(value=marker_location_array, library_type=_visatype.ViInt32) # case B550
coerced_markers_array_size = (0 if marker_location_array is None else len(marker_location_array)) # case B560
coerced_markers_array_ctype = get_ctypes_pointer_for_buffer(library_type=_visatype.ViInt32, size=coerced_markers_array_size) # case B560
sequence_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateAdvancedArbSequence(vi_ctype, sequence_length_ctype, waveform_handles_array_ctype, loop_counts_array_ctype, sample_counts_array_ctype, marker_location_array_ctype, coerced_markers_array_ctype, None if sequence_handle_ctype is None else (ctypes.pointer(sequence_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return [int(coerced_markers_array_ctype[i]) for i in range((0 if marker_location_array is None else len(marker_location_array)))], int(sequence_handle_ctype.value)
@ivi_synchronized
def create_arb_sequence(self, waveform_handles_array, loop_counts_array):
r'''create_arb_sequence
Creates an arbitrary sequence from an array of waveform handles and an
array of corresponding loop counts. This method returns a handle that
identifies the sequence. You pass this handle to the
configure_arb_sequence method to specify what arbitrary sequence
you want the signal generator to produce.
An arbitrary sequence consists of multiple waveforms. For each waveform,
you can specify the number of times that the signal generator produces
the waveform before proceeding to the next waveform. The number of times
to repeat a specific waveform is called the loop count.
Note:
You must call the ConfigureOutputMode method to set the
**outputMode** parameter to OutputMode.SEQ before calling this
method.
Args:
waveform_handles_array (list of int): Specifies the array of waveform handles from which you want to create a
new arbitrary sequence. The array must have at least as many elements as
the value that you specify in **sequenceLength**. Each
**waveformHandlesArray** element has a corresponding **loopCountsArray**
element that indicates how many times that waveform is repeated. You
obtain waveform handles when you create arbitrary waveforms with the
allocate_waveform method or one of the following niFgen
CreateWaveform methods:
- create_waveform
- create_waveform
- create_waveform_from_file_i16
- create_waveform_from_file_f64
- CreateWaveformFromFileHWS
**Default Value**: None
loop_counts_array (list of int): Specifies the array of loop counts you want to use to create a new
arbitrary sequence. The array must have at least as many elements as the
value that you specify in the **sequenceLength** parameter. Each
**loopCountsArray** element corresponds to a **waveformHandlesArray**
element and indicates how many times to repeat that waveform. Each
element of the **loopCountsArray** must be less than or equal to the
maximum number of loop counts that the signal generator allows. You can
obtain the maximum loop count from **maximumLoopCount** in the
query_arb_seq_capabilities method.
**Default Value**: None
Returns:
sequence_handle (int): Returns the handle that identifies the new arbitrary sequence. You can
pass this handle to configure_arb_sequence to generate the
arbitrary sequence.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
sequence_length_ctype = _visatype.ViInt32(0 if waveform_handles_array is None else len(waveform_handles_array)) # case S160
if loop_counts_array is not None and len(loop_counts_array) != len(waveform_handles_array): # case S160
raise ValueError("Length of loop_counts_array and waveform_handles_array parameters do not match.") # case S160
waveform_handles_array_ctype = get_ctypes_pointer_for_buffer(value=waveform_handles_array, library_type=_visatype.ViInt32) # case B550
loop_counts_array_ctype = get_ctypes_pointer_for_buffer(value=loop_counts_array, library_type=_visatype.ViInt32) # case B550
sequence_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateArbSequence(vi_ctype, sequence_length_ctype, waveform_handles_array_ctype, loop_counts_array_ctype, None if sequence_handle_ctype is None else (ctypes.pointer(sequence_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(sequence_handle_ctype.value)
@ivi_synchronized
def create_freq_list(self, waveform, frequency_array, duration_array):
r'''create_freq_list
Creates a frequency list from an array of frequencies
(**frequencyArray**) and an array of durations (**durationArray**). The
two arrays should have the same number of elements, and this value must
also be the size of the **frequencyListLength**. The method returns a
handle that identifies the frequency list (the **frequencyListHandle**).
You can pass this handle to configure_freq_list to specify what
frequency list you want the signal generator to produce.
A frequency list consists of a list of frequencies and durations. The
signal generator generates each frequency for the given amount of time
and then proceeds to the next frequency. When the end of the list is
reached, the signal generator starts over at the beginning of the list.
Note:
The signal generator must not be in the Generating state when you call
this method.
Args:
waveform (enums.Waveform): Specifies the standard waveform that you want the signal generator to
produce. NI-FGEN sets the func_waveform property to this
value.
****Defined Values****
**Default Value**: Waveform.SINE
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.SINE | Specifies that the signal generator produces a sinusoid waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.SQUARE | Specifies that the signal generator produces a square waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.TRIANGLE | Specifies that the signal generator produces a triangle waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.RAMP_UP | Specifies that the signal generator produces a positive ramp waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.RAMP_DOWN | Specifies that the signal generator produces a negative ramp waveform. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.DC | Specifies that the signal generator produces a constant voltage. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.NOISE | Specifies that the signal generator produces white noise. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
| Waveform.USER | Specifies that the signal generator produces a user-defined waveform as defined with the define_user_standard_waveform method. |
+--------------------+--------------------------------------------------------------------------------------------------------------------------------+
frequency_array (list of float): Specifies the array of frequencies to form the frequency list. The array
must have at least as many elements as the value you specify in
**frequencyListLength**. Each **frequencyArray** element has a
corresponding **durationArray** element that indicates how long that
frequency is repeated.
**Units**: hertz
**Default Value**: None
duration_array (list of float): Specifies the array of durations to form the frequency list. The array
must have at least as many elements as the value that you specify in
**frequencyListLength**. Each **durationArray** element has a
corresponding **frequencyArray** element and indicates how long in
seconds to generate the corresponding frequency.
**Units**: seconds
**Default Value**: None
Returns:
frequency_list_handle (int): Returns the handle that identifies the new frequency list. You can pass
this handle to configure_freq_list to generate the arbitrary
sequence.
'''
if type(waveform) is not enums.Waveform:
raise TypeError('Parameter waveform must be of type ' + str(enums.Waveform))
vi_ctype = _visatype.ViSession(self._vi) # case S110
waveform_ctype = _visatype.ViInt32(waveform.value) # case S130
frequency_list_length_ctype = _visatype.ViInt32(0 if frequency_array is None else len(frequency_array)) # case S160
if duration_array is not None and len(duration_array) != len(frequency_array): # case S160
raise ValueError("Length of duration_array and frequency_array parameters do not match.") # case S160
frequency_array_ctype = get_ctypes_pointer_for_buffer(value=frequency_array, library_type=_visatype.ViReal64) # case B550
duration_array_ctype = get_ctypes_pointer_for_buffer(value=duration_array, library_type=_visatype.ViReal64) # case B550
frequency_list_handle_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_CreateFreqList(vi_ctype, waveform_ctype, frequency_list_length_ctype, frequency_array_ctype, duration_array_ctype, None if frequency_list_handle_ctype is None else (ctypes.pointer(frequency_list_handle_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(frequency_list_handle_ctype.value)
@ivi_synchronized
def disable(self):
r'''disable
Places the instrument in a quiescent state where it has minimal or no
impact on the system to which it is connected. The analog output and all
exported signals are disabled.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_Disable(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def export_attribute_configuration_buffer(self):
r'''export_attribute_configuration_buffer
Exports the property configuration of the session to a configuration
buffer.
You can export and import session property configurations only between
devices with identical model numbers, channel counts, and onboard memory
sizes.
This method verifies that the properties you have configured for the
session are valid. If the configuration is invalid, NI‑FGEN returns an
error.
Returns:
configuration (bytes): Specifies the byte array buffer to be populated with the exported
property configuration.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
size_in_bytes_ctype = _visatype.ViInt32() # case S170
configuration_ctype = None # case B580
error_code = self._library.niFgen_ExportAttributeConfigurationBuffer(vi_ctype, size_in_bytes_ctype, configuration_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
size_in_bytes_ctype = _visatype.ViInt32(error_code) # case S180
configuration_size = size_in_bytes_ctype.value # case B590
configuration_array = array.array("b", [0] * configuration_size) # case B590
configuration_ctype = get_ctypes_pointer_for_buffer(value=configuration_array, library_type=_visatype.ViInt8) # case B590
error_code = self._library.niFgen_ExportAttributeConfigurationBuffer(vi_ctype, size_in_bytes_ctype, configuration_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return _converters.convert_to_bytes(configuration_array)
@ivi_synchronized
def export_attribute_configuration_file(self, file_path):
r'''export_attribute_configuration_file
Exports the property configuration of the session to the specified
file.
You can export and import session property configurations only between
devices with identical model numbers, channel counts, and onboard memory
sizes.
This method verifies that the properties you have configured for the
session are valid. If the configuration is invalid, NI‑FGEN returns an
error.
Args:
file_path (str): Specifies the absolute path to the file to contain the exported
property configuration. If you specify an empty or relative path, this
method returns an error.
**Default file extension:** .nifgenconfig
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
file_path_ctype = ctypes.create_string_buffer(file_path.encode(self._encoding)) # case C020
error_code = self._library.niFgen_ExportAttributeConfigurationFile(vi_ctype, file_path_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def get_channel_name(self, index):
r'''get_channel_name
Returns the channel string that is in the channel table at an index you
specify.
Note:
This method is included for compliance with the IviFgen Class
Specification.
Args:
index (int): A 1-based index into the channel table.
Returns:
channel_string (str): Returns the channel string that is in the channel table at the index you
specify. Do not modify the contents of the channel string.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
index_ctype = _visatype.ViInt32(index) # case S150
buffer_size_ctype = _visatype.ViInt32() # case S170
channel_string_ctype = None # case C050
error_code = self._library.niFgen_GetChannelName(vi_ctype, index_ctype, buffer_size_ctype, channel_string_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
channel_string_ctype = (_visatype.ViChar * buffer_size_ctype.value)() # case C060
error_code = self._library.niFgen_GetChannelName(vi_ctype, index_ctype, buffer_size_ctype, channel_string_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return channel_string_ctype.value.decode(self._encoding)
@ivi_synchronized
def _get_ext_cal_last_date_and_time(self):
r'''_get_ext_cal_last_date_and_time
Returns the date and time of the last successful external calibration.
The time returned is 24-hour (military) local time; for example, if the
device was calibrated at 2:30 PM, this method returns 14 for the
**hour** parameter and 30 for the **minute** parameter.
Returns:
year (int): Specifies the year of the last successful calibration.
month (int): Specifies the month of the last successful calibration.
day (int): Specifies the day of the last successful calibration.
hour (int): Specifies the hour of the last successful calibration.
minute (int): Specifies the minute of the last successful calibration.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
year_ctype = _visatype.ViInt32() # case S220
month_ctype = _visatype.ViInt32() # case S220
day_ctype = _visatype.ViInt32() # case S220
hour_ctype = _visatype.ViInt32() # case S220
minute_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_GetExtCalLastDateAndTime(vi_ctype, None if year_ctype is None else (ctypes.pointer(year_ctype)), None if month_ctype is None else (ctypes.pointer(month_ctype)), None if day_ctype is None else (ctypes.pointer(day_ctype)), None if hour_ctype is None else (ctypes.pointer(hour_ctype)), None if minute_ctype is None else (ctypes.pointer(minute_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(year_ctype.value), int(month_ctype.value), int(day_ctype.value), int(hour_ctype.value), int(minute_ctype.value)
@ivi_synchronized
def get_ext_cal_last_temp(self):
r'''get_ext_cal_last_temp
Returns the temperature at the last successful external calibration. The
temperature is returned in degrees Celsius.
Returns:
temperature (float): Specifies the temperature at the last successful calibration in degrees
Celsius.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
temperature_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niFgen_GetExtCalLastTemp(vi_ctype, None if temperature_ctype is None else (ctypes.pointer(temperature_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return float(temperature_ctype.value)
@ivi_synchronized
def get_ext_cal_recommended_interval(self):
r'''get_ext_cal_recommended_interval
Returns the recommended interval between external calibrations in
months.
Returns:
months (datetime.timedelta): Specifies the recommended interval between external calibrations in
months.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
months_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_GetExtCalRecommendedInterval(vi_ctype, None if months_ctype is None else (ctypes.pointer(months_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return _converters.convert_month_to_timedelta(int(months_ctype.value))
@ivi_synchronized
def get_hardware_state(self):
r'''get_hardware_state
Returns the current hardware state of the device and, if the device is
in the hardware error state, the current hardware error.
Note: Hardware states do not necessarily correspond to NI-FGEN states.
Returns:
state (enums.HardwareState): Returns the hardware state of the signal generator.
**Defined Values**
+-----------------------------------------+--------------------------------------------+
| HardwareState.IDLE | The device is in the Idle state. |
+-----------------------------------------+--------------------------------------------+
| HardwareState.WAITING_FOR_START_TRIGGER | The device is waiting for Start Trigger. |
+-----------------------------------------+--------------------------------------------+
| HardwareState.RUNNING | The device is in the Running state. |
+-----------------------------------------+--------------------------------------------+
| HardwareState.DONE | The generation has completed successfully. |
+-----------------------------------------+--------------------------------------------+
| HardwareState.HARDWARE_ERROR | There is a hardware error. |
+-----------------------------------------+--------------------------------------------+
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
state_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_GetHardwareState(vi_ctype, None if state_ctype is None else (ctypes.pointer(state_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return enums.HardwareState(state_ctype.value)
@ivi_synchronized
def get_ext_cal_last_date_and_time(self):
'''get_ext_cal_last_date_and_time
Returns the date and time of the last successful external calibration. The time returned is 24-hour (military) local time; for example, if the device was calibrated at 2:30 PM, this method returns 14 for the **hour** parameter and 30 for the **minute** parameter.
Returns:
month (datetime.datetime): Indicates date and time of the last calibration.
'''
year, month, day, hour, minute = self._get_ext_cal_last_date_and_time()
return datetime.datetime(year, month, day, hour, minute)
@ivi_synchronized
def get_self_cal_last_date_and_time(self):
'''get_self_cal_last_date_and_time
Returns the date and time of the last successful self-calibration.
Returns:
month (datetime.datetime): Returns the date and time the device was last calibrated.
'''
year, month, day, hour, minute = self._get_self_cal_last_date_and_time()
return datetime.datetime(year, month, day, hour, minute)
@ivi_synchronized
def _get_self_cal_last_date_and_time(self):
r'''_get_self_cal_last_date_and_time
Returns the date and time of the last successful self-calibration.
All values are returned as separate parameters. Each parameter is
returned as an integer, including the year, month, day, hour, minute,
and second. For example, if the device is calibrated in September 2013,
this method returns 9 for the **month** parameter and 2013 for the
**year** parameter.
The time returned is 24-hour (military) local time. For example, if the
device was calibrated at 2:30 PM, this method returns 14 for the
**hours** parameter and 30 for the **minutes** parameter.
Returns:
year (int): Specifies the year of the last successful calibration.
month (int): Specifies the month of the last successful calibration.
day (int): Specifies the day of the last successful calibration.
hour (int): Specifies the hour of the last successful calibration.
minute (int): Specifies the minute of the last successful calibration.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
year_ctype = _visatype.ViInt32() # case S220
month_ctype = _visatype.ViInt32() # case S220
day_ctype = _visatype.ViInt32() # case S220
hour_ctype = _visatype.ViInt32() # case S220
minute_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_GetSelfCalLastDateAndTime(vi_ctype, None if year_ctype is None else (ctypes.pointer(year_ctype)), None if month_ctype is None else (ctypes.pointer(month_ctype)), None if day_ctype is None else (ctypes.pointer(day_ctype)), None if hour_ctype is None else (ctypes.pointer(hour_ctype)), None if minute_ctype is None else (ctypes.pointer(minute_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(year_ctype.value), int(month_ctype.value), int(day_ctype.value), int(hour_ctype.value), int(minute_ctype.value)
@ivi_synchronized
def get_self_cal_last_temp(self):
r'''get_self_cal_last_temp
Returns the temperature at the last successful self-calibration. The
temperature is returned in degrees Celsius.
Returns:
temperature (float): Specifies the temperature at the last successful calibration in degrees
Celsius.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
temperature_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niFgen_GetSelfCalLastTemp(vi_ctype, None if temperature_ctype is None else (ctypes.pointer(temperature_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return float(temperature_ctype.value)
@ivi_synchronized
def get_self_cal_supported(self):
r'''get_self_cal_supported
Returns whether the device supports self–calibration.
Returns:
self_cal_supported (bool): Returns whether the device supports self-calibration.
****Defined Values****
+-------+------------------------------------+
| True | Self–calibration is supported. |
+-------+------------------------------------+
| False | Self–calibration is not supported. |
+-------+------------------------------------+
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
self_cal_supported_ctype = _visatype.ViBoolean() # case S220
error_code = self._library.niFgen_GetSelfCalSupported(vi_ctype, None if self_cal_supported_ctype is None else (ctypes.pointer(self_cal_supported_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return bool(self_cal_supported_ctype.value)
@ivi_synchronized
def import_attribute_configuration_buffer(self, configuration):
r'''import_attribute_configuration_buffer
Imports a property configuration to the session from the specified
configuration buffer.
You can export and import session property configurations only between
devices with identical model numbers, channel counts, and onboard memory
sizes.
Note:
You cannot call this method while the session is in a running state,
such as while generating a signal.
Args:
configuration (bytes): Specifies the byte array buffer that contains the property
configuration to import.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
size_in_bytes_ctype = _visatype.ViInt32(0 if configuration is None else len(configuration)) # case S160
configuration_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_bytes(configuration), library_type=_visatype.ViInt8) # case B520
error_code = self._library.niFgen_ImportAttributeConfigurationBuffer(vi_ctype, size_in_bytes_ctype, configuration_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def import_attribute_configuration_file(self, file_path):
r'''import_attribute_configuration_file
Imports a property configuration to the session from the specified
file.
You can export and import session property configurations only between
devices with identical model numbers, channel counts, and onboard memory
sizes.
Note:
You cannot call this method while the session is in a running state,
such as while generating a signal.
Args:
file_path (str): Specifies the absolute path to the file containing the property
configuration to import. If you specify an empty or relative path, this
method returns an error.
**Default File Extension:** .nifgenconfig
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
file_path_ctype = ctypes.create_string_buffer(file_path.encode(self._encoding)) # case C020
error_code = self._library.niFgen_ImportAttributeConfigurationFile(vi_ctype, file_path_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _initialize_with_channels(self, resource_name, channel_name=None, reset_device=False, option_string=""):
r'''_initialize_with_channels
Creates and returns a new NI-FGEN session to the specified channel of a
waveform generator that is used in all subsequent NI-FGEN method
calls.
Args:
resource_name (str): Caution:
Traditional NI-DAQ and NI-DAQmx device names are not case-sensitive.
However, all IVI names, such as logical names, are case-sensitive. If
you use logical names, driver session names, or virtual names in your
program, you must ensure that the name you use matches the name in the
IVI Configuration Store file exactly, without any variations in the case
of the characters.
| Specifies the resource name of the device to initialize.
For Traditional NI-DAQ devices, the syntax is DAQ::\ *n*, where *n* is
the device number assigned by MAX, as shown in Example 1.
For NI-DAQmx devices, the syntax is just the device name specified in
MAX, as shown in Example 2. Typical default names for NI-DAQmx devices
in MAX are Dev1 or PXI1Slot1. You can rename an NI-DAQmx device by
right-clicking on the name in MAX and entering a new name.
An alternate syntax for NI-DAQmx devices consists of DAQ::\ *NI-DAQmx
device name*, as shown in Example 3. This naming convention allows for
the use of an NI-DAQmx device in an application that was originally
designed for a Traditional NI-DAQ device. For example, if the
application expects DAQ::1, you can rename the NI-DAQmx device to 1 in
MAX and pass in DAQ::1 for the resource name, as shown in Example 4.
If you use the DAQ::\ *n* syntax and an NI-DAQmx device name already
exists with that same name, the NI-DAQmx device is matched first.
You can also pass in the name of an IVI logical name or an IVI virtual
name configured with the IVI Configuration utility, as shown in Example
5. A logical name identifies a particular virtual instrument. A virtual
name identifies a specific device and specifies the initial settings for
the session.
+-----------+--------------------------------------+------------------------+---------------------------------+
| Example # | Device Type | Syntax | Variable |
+===========+======================================+========================+=================================+
| 1 | Traditional NI-DAQ device | DAQ::\ *1* | (*1* = device number) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 2 | NI-DAQmx device | *myDAQmxDevice* | (*myDAQmxDevice* = device name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 3 | NI-DAQmx device | DAQ::\ *myDAQmxDevice* | (*myDAQmxDevice* = device name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 4 | NI-DAQmx device | DAQ::\ *2* | (*2* = device name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
| 5 | IVI logical name or IVI virtual name | *myLogicalName* | (*myLogicalName* = name) |
+-----------+--------------------------------------+------------------------+---------------------------------+
channel_name (str, list, range, tuple): Specifies the channel that this VI uses.
**Default Value**: "0"
reset_device (bool): Specifies whether you want to reset the device during the initialization
procedure. True specifies that the device is reset and performs the
same method as the Reset method.
****Defined Values****
**Default Value**: False
+-------+---------------------+
| True | Reset device |
+-------+---------------------+
| False | Do not reset device |
+-------+---------------------+
option_string (dict): Sets the initial value of certain session properties.
The syntax for **optionString** is
<*attributeName*> = <*value*>
where
*attributeName* is the name of the property and *value* is the value to
which the property is set
To set multiple properties, separate them with a comma.
If you pass NULL or an empty string for this parameter, the session uses
the default values for these properties. You can override the default
values by assigning a value explicitly in a string that you pass for
this parameter.
You do not have to specify all of the properties and may leave any of
them out. However, if you do not specify one of the properties, its
default value is used.
If simulation is enabled (Simulate=1), you may specify the device that
you want to simulate. To specify a device, enter the following syntax in
**optionString**.
DriverSetup=Model:<*driver model number*>;Channels:<*channel
names*>;BoardType:<*module type*>;MemorySize:<*size of onboard memory in
bytes*>
**Syntax Examples**
**Properties and **Defined Values****
**Default Values**: "Simulate=0,RangeCheck=1,QueryInstrStatus=1,Cache=1"
+------------------+-------------------------+-------------+
| Property Name | Property | Values |
+==================+=========================+=============+
| RangeCheck | RANGE_CHECK | True, False |
+------------------+-------------------------+-------------+
| QueryInstrStatus | QUERY_INSTRUMENT_STATUS | True, False |
+------------------+-------------------------+-------------+
| Cache | CACHE | True, False |
+------------------+-------------------------+-------------+
| Simulate | simulate | True, False |
+------------------+-------------------------+-------------+
Note:
One or more of the referenced properties are not in the Python API for this driver.
Returns:
vi (int): Returns a session handle that you can use to identify the device in all
subsequent NI-FGEN method calls.
'''
resource_name_ctype = ctypes.create_string_buffer(resource_name.encode(self._encoding)) # case C020
channel_name_ctype = ctypes.create_string_buffer(channel_name.encode(self._encoding)) # case C020
reset_device_ctype = _visatype.ViBoolean(reset_device) # case S150
option_string_ctype = ctypes.create_string_buffer(option_string.encode(self._encoding)) # case C020
vi_ctype = _visatype.ViSession() # case S220
error_code = self._library.niFgen_InitializeWithChannels(resource_name_ctype, channel_name_ctype, reset_device_ctype, option_string_ctype, None if vi_ctype is None else (ctypes.pointer(vi_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(vi_ctype.value)
@ivi_synchronized
def _initiate_generation(self):
r'''_initiate_generation
Initiates signal generation. If you want to abort signal generation,
call the abort method. After the signal generation
is aborted, you can call the initiate method to
cause the signal generator to produce a signal again.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_InitiateGeneration(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def is_done(self):
r'''is_done
Determines whether the current generation is complete. This method
sets the **done** parameter to True if the session is in the Idle or
Committed states.
Note:
NI-FGEN only reports the **done** parameter as True after the
current generation is complete in Single trigger mode.
Returns:
done (bool): Returns information about the completion of waveform generation.
**Defined Values**
+-------+-----------------------------+
| True | Generation is complete. |
+-------+-----------------------------+
| False | Generation is not complete. |
+-------+-----------------------------+
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
done_ctype = _visatype.ViBoolean() # case S220
error_code = self._library.niFgen_IsDone(vi_ctype, None if done_ctype is None else (ctypes.pointer(done_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return bool(done_ctype.value)
@ivi_synchronized
def query_arb_seq_capabilities(self):
r'''query_arb_seq_capabilities
Returns the properties of the signal generator that are related to
creating arbitrary sequences (the max_num_sequences,
min_sequence_length,
max_sequence_length, and max_loop_count
properties).
Returns:
maximum_number_of_sequences (int): Returns the maximum number of arbitrary waveform sequences that the
signal generator allows. NI-FGEN obtains this value from the
max_num_sequences property.
minimum_sequence_length (int): Returns the minimum number of arbitrary waveforms the signal generator
allows in a sequence. NI-FGEN obtains this value from the
min_sequence_length property.
maximum_sequence_length (int): Returns the maximum number of arbitrary waveforms the signal generator
allows in a sequence. NI-FGEN obtains this value from the
max_sequence_length property.
maximum_loop_count (int): Returns the maximum number of times the signal generator can repeat an
arbitrary waveform in a sequence. NI-FGEN obtains this value from the
max_loop_count property.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
maximum_number_of_sequences_ctype = _visatype.ViInt32() # case S220
minimum_sequence_length_ctype = _visatype.ViInt32() # case S220
maximum_sequence_length_ctype = _visatype.ViInt32() # case S220
maximum_loop_count_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_QueryArbSeqCapabilities(vi_ctype, None if maximum_number_of_sequences_ctype is None else (ctypes.pointer(maximum_number_of_sequences_ctype)), None if minimum_sequence_length_ctype is None else (ctypes.pointer(minimum_sequence_length_ctype)), None if maximum_sequence_length_ctype is None else (ctypes.pointer(maximum_sequence_length_ctype)), None if maximum_loop_count_ctype is None else (ctypes.pointer(maximum_loop_count_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(maximum_number_of_sequences_ctype.value), int(minimum_sequence_length_ctype.value), int(maximum_sequence_length_ctype.value), int(maximum_loop_count_ctype.value)
@ivi_synchronized
def query_arb_wfm_capabilities(self):
r'''query_arb_wfm_capabilities
Returns the properties of the signal generator that are related to
creating arbitrary waveforms. These properties are the maximum number of
waveforms, waveform quantum, minimum waveform size, and maximum waveform
size.
Note:
If you do not want to obtain the waveform quantum, pass a value of
VI_NULL for this parameter.
Returns:
maximum_number_of_waveforms (int): Returns the maximum number of arbitrary waveforms that the signal
generator allows. NI-FGEN obtains this value from the
max_num_waveforms property.
waveform_quantum (int): The size (number of points) of each waveform must be a multiple of a
constant quantum value. This parameter obtains the quantum value that
the signal generator uses. NI-FGEN returns this value from the
waveform_quantum property.
For example, when this property returns a value of 8, all waveform
sizes must be a multiple of 8.
minimum_waveform_size (int): Returns the minimum number of points that the signal generator allows in
a waveform. NI-FGEN obtains this value from the
min_waveform_size property.
maximum_waveform_size (int): Returns the maximum number of points that the signal generator allows in
a waveform. NI-FGEN obtains this value from the
max_waveform_size property.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
maximum_number_of_waveforms_ctype = _visatype.ViInt32() # case S220
waveform_quantum_ctype = _visatype.ViInt32() # case S220
minimum_waveform_size_ctype = _visatype.ViInt32() # case S220
maximum_waveform_size_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niFgen_QueryArbWfmCapabilities(vi_ctype, None if maximum_number_of_waveforms_ctype is None else (ctypes.pointer(maximum_number_of_waveforms_ctype)), None if waveform_quantum_ctype is None else (ctypes.pointer(waveform_quantum_ctype)), None if minimum_waveform_size_ctype is None else (ctypes.pointer(minimum_waveform_size_ctype)), None if maximum_waveform_size_ctype is None else (ctypes.pointer(maximum_waveform_size_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(maximum_number_of_waveforms_ctype.value), int(waveform_quantum_ctype.value), int(minimum_waveform_size_ctype.value), int(maximum_waveform_size_ctype.value)
@ivi_synchronized
def query_freq_list_capabilities(self):
r'''query_freq_list_capabilities
Returns the properties of the signal generator that are related to
creating frequency lists. These properties are
max_num_freq_lists,
min_freq_list_length,
max_freq_list_length,
min_freq_list_duration,
max_freq_list_duration, and
freq_list_duration_quantum.
Returns:
maximum_number_of_freq_lists (int): Returns the maximum number of frequency lists that the signal generator
allows. NI-FGEN obtains this value from the
max_num_freq_lists property.
minimum_frequency_list_length (int): Returns the minimum number of steps that the signal generator allows in
a frequency list. NI-FGEN obtains this value from the
min_freq_list_length property.
maximum_frequency_list_length (int): Returns the maximum number of steps that the signal generator allows in
a frequency list. NI-FGEN obtains this value from the
max_freq_list_length property.
minimum_frequency_list_duration (float): Returns the minimum duration that the signal generator allows in a step
of a frequency list. NI-FGEN obtains this value from the
min_freq_list_duration property.
maximum_frequency_list_duration (float): Returns the maximum duration that the signal generator allows in a step
of a frequency list. NI-FGEN obtains this value from the
max_freq_list_duration property.
frequency_list_duration_quantum (float): Returns the quantum of which all durations must be a multiple in a
frequency list. NI-FGEN obtains this value from the
freq_list_duration_quantum property.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
maximum_number_of_freq_lists_ctype = _visatype.ViInt32() # case S220
minimum_frequency_list_length_ctype = _visatype.ViInt32() # case S220
maximum_frequency_list_length_ctype = _visatype.ViInt32() # case S220
minimum_frequency_list_duration_ctype = _visatype.ViReal64() # case S220
maximum_frequency_list_duration_ctype = _visatype.ViReal64() # case S220
frequency_list_duration_quantum_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niFgen_QueryFreqListCapabilities(vi_ctype, None if maximum_number_of_freq_lists_ctype is None else (ctypes.pointer(maximum_number_of_freq_lists_ctype)), None if minimum_frequency_list_length_ctype is None else (ctypes.pointer(minimum_frequency_list_length_ctype)), None if maximum_frequency_list_length_ctype is None else (ctypes.pointer(maximum_frequency_list_length_ctype)), None if minimum_frequency_list_duration_ctype is None else (ctypes.pointer(minimum_frequency_list_duration_ctype)), None if maximum_frequency_list_duration_ctype is None else (ctypes.pointer(maximum_frequency_list_duration_ctype)), None if frequency_list_duration_quantum_ctype is None else (ctypes.pointer(frequency_list_duration_quantum_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(maximum_number_of_freq_lists_ctype.value), int(minimum_frequency_list_length_ctype.value), int(maximum_frequency_list_length_ctype.value), float(minimum_frequency_list_duration_ctype.value), float(maximum_frequency_list_duration_ctype.value), float(frequency_list_duration_quantum_ctype.value)
@ivi_synchronized
def read_current_temperature(self):
r'''read_current_temperature
Reads the current onboard temperature of the device. The temperature is
returned in degrees Celsius.
Returns:
temperature (float): Returns the current temperature read from onboard temperature sensors,
in degrees Celsius.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
temperature_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niFgen_ReadCurrentTemperature(vi_ctype, None if temperature_ctype is None else (ctypes.pointer(temperature_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return float(temperature_ctype.value)
@ivi_synchronized
def reset_device(self):
r'''reset_device
Performs a hard reset on the device. Generation is stopped, all routes
are released, external bidirectional terminals are tristated, FPGAs are
reset, hardware is configured to its default state, and all session
properties are reset to their default states.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_ResetDevice(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def reset_with_defaults(self):
r'''reset_with_defaults
Resets the instrument and reapplies initial user–specified settings from
the logical name that was used to initialize the session. If the session
was created without a logical name, this method is equivalent to the
reset method.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_ResetWithDefaults(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def self_cal(self):
r'''self_cal
Performs a full internal self-calibration on the device. If the
calibration is successful, new calibration data and constants are stored
in the onboard EEPROM.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_SelfCal(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def wait_until_done(self, max_time=datetime.timedelta(seconds=10.0)):
r'''wait_until_done
Waits until the device is done generating or until the maximum time has
expired.
Args:
max_time (int in milliseconds or datetime.timedelta): Specifies the timeout value in milliseconds.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
max_time_ctype = _converters.convert_timedelta_to_milliseconds_int32(max_time) # case S140
error_code = self._library.niFgen_WaitUntilDone(vi_ctype, max_time_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _close(self):
r'''_close
Performs the following operations:
- Closes the instrument I/O session.
- Destroys the NI-FGEN session and all of its properties.
- Deallocates any memory resources NI-FGEN uses.
Not all signal routes established by calling the ExportSignal
and RouteSignalOut methods are released when the NI-FGEN
session is closed. The following table shows what happens to a signal
route on your device when you call the _close method.
+--------------------+-------------------+------------------+
| Routes To | NI 5401/5411/5431 | Other Devices |
+====================+===================+==================+
| Front Panel | Remain connected | Remain connected |
+--------------------+-------------------+------------------+
| RTSI/PXI Backplane | Remain connected | Disconnected |
+--------------------+-------------------+------------------+
Note:
After calling _close, you cannot use NI-FGEN again until you
call the init or InitWithOptions methods.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_close(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def self_test(self):
'''self_test
Runs the instrument self-test routine and returns the test result(s).
Raises `SelfTestError` on self test failure. Properties on exception object:
- code - failure code from driver
- message - status message from driver
+----------------+------------------+
| Self-Test Code | Description |
+================+==================+
| 0 | Passed self-test |
+----------------+------------------+
| 1 | Self-test failed |
+----------------+------------------+
Note:
When used on some signal generators, the device is reset after the
self_test method runs. If you use the self_test
method, your device may not be in its previously configured state
after the method runs.
'''
code, msg = self._self_test()
if code:
raise errors.SelfTestError(code, msg)
return None
@ivi_synchronized
def reset(self):
r'''reset
Resets the instrument to a known state. This method aborts the
generation, clears all routes, and resets session properties to the
default values. This method does not, however, commit the session
properties or configure the device hardware to its default state.
Note:
For the NI 5401/5404/5411/5431, this method exhibits the same
behavior as the reset_device method.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niFgen_reset(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _self_test(self):
r'''_self_test
Runs the instrument self-test routine and returns the test result(s).
Note:
When used on some signal generators, the device is reset after the
self_test method runs. If you use the self_test
method, your device may not be in its previously configured state
after the method runs.
Returns:
self_test_result (int): Contains the value returned from the instrument self-test. A value of 0
indicates success.
+----------------+------------------+
| Self-Test Code | Description |
+================+==================+
| 0 | Passed self-test |
+----------------+------------------+
| 1 | Self-test failed |
+----------------+------------------+
self_test_message (str): Returns the self-test response string from the instrument.
You must pass a ViChar array with at least 256 bytes.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
self_test_result_ctype = _visatype.ViInt16() # case S220
self_test_message_ctype = (_visatype.ViChar * 256)() # case C070
error_code = self._library.niFgen_self_test(vi_ctype, None if self_test_result_ctype is None else (ctypes.pointer(self_test_result_ctype)), self_test_message_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(self_test_result_ctype.value), self_test_message_ctype.value.decode(self._encoding)
| [
"nifgen._attributes.AttributeEnum",
"nifgen._attributes.AttributeViReal64TimeDeltaSeconds",
"nifgen._visatype.ViInt16",
"nifgen._visatype.ViStatus",
"nifgen._library_singleton.get",
"nifgen._visatype.ViSession",
"numpy.isfortran",
"nifgen._visatype.ViAttr",
"nifgen.enums.HardwareState",
"ctypes.po... | [((467, 497), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (487, 497), False, 'import pprint\n'), ((2077, 2085), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2082, 2085), False, 'from functools import wraps\n'), ((4585, 4623), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150413)'], {}), '(1150413)\n', (4614, 4623), True, 'import nifgen._attributes as _attributes\n'), ((5414, 5451), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150349)'], {}), '(1150349)\n', (5442, 5451), True, 'import nifgen._attributes as _attributes\n'), ((5658, 5695), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150344)'], {}), '(1150344)\n', (5686, 5695), True, 'import nifgen._attributes as _attributes\n'), ((5810, 5847), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150234)'], {}), '(1150234)\n', (5838, 5847), True, 'import nifgen._attributes as _attributes\n'), ((6022, 6061), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1150103)'], {}), '(1150103)\n', (6052, 6061), True, 'import nifgen._attributes as _attributes\n'), ((6386, 6473), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.AnalogPath', '(1150222)'], {}), '(_attributes.AttributeViInt32, enums.AnalogPath, \n 1150222)\n', (6411, 6473), True, 'import nifgen._attributes as _attributes\n'), ((7024, 7061), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150235)'], {}), '(1150235)\n', (7052, 7061), True, 'import nifgen._attributes as _attributes\n'), ((7181, 7219), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250202)'], {}), '(1250202)\n', (7210, 7219), True, 'import nifgen._attributes as _attributes\n'), ((7720, 7757), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150327)'], {}), '(1150327)\n', (7748, 7757), True, 'import nifgen._attributes as _attributes\n'), ((8526, 8564), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250203)'], {}), '(1250203)\n', (8555, 8564), True, 'import nifgen._attributes as _attributes\n'), ((9063, 9100), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150328)'], {}), '(1150328)\n', (9091, 9100), True, 'import nifgen._attributes as _attributes\n'), ((9714, 9752), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250204)'], {}), '(1250204)\n', (9743, 9752), True, 'import nifgen._attributes as _attributes\n'), ((10005, 10042), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250211)'], {}), '(1250211)\n', (10033, 10042), True, 'import nifgen._attributes as _attributes\n'), ((10516, 10553), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250201)'], {}), '(1250201)\n', (10544, 10553), True, 'import nifgen._attributes as _attributes\n'), ((11277, 11316), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1150411)'], {}), '(1150411)\n', (11307, 11316), True, 'import nifgen._attributes as _attributes\n'), ((11576, 11655), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.BusType', '(1150215)'], {}), '(_attributes.AttributeViInt32, enums.BusType, 1150215)\n', (11601, 11655), True, 'import nifgen._attributes as _attributes\n'), ((11754, 11792), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150369)'], {}), '(1150369)\n', (11783, 11792), True, 'import nifgen._attributes as _attributes\n'), ((12248, 12334), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.ClockMode', '(1150110)'], {}), '(_attributes.AttributeViInt32, enums.ClockMode, \n 1150110)\n', (12273, 12334), True, 'import nifgen._attributes as _attributes\n'), ((12774, 12812), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150366)'], {}), '(1150366)\n', (12803, 12812), True, 'import nifgen._attributes as _attributes\n'), ((13191, 13228), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150273)'], {}), '(1150273)\n', (13219, 13228), True, 'import nifgen._attributes as _attributes\n'), ((13365, 13402), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150337)'], {}), '(1150337)\n', (13393, 13402), True, 'import nifgen._attributes as _attributes\n'), ((13902, 14007), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.DataMarkerEventLevelPolarity', '(1150338)'], {}), '(_attributes.AttributeViInt32, enums.\n DataMarkerEventLevelPolarity, 1150338)\n', (13927, 14007), True, 'import nifgen._attributes as _attributes\n'), ((14529, 14567), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150339)'], {}), '(1150339)\n', (14558, 14567), True, 'import nifgen._attributes as _attributes\n'), ((15060, 15097), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150241)'], {}), '(1150241)\n', (15088, 15097), True, 'import nifgen._attributes as _attributes\n'), ((15302, 15340), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150373)'], {}), '(1150373)\n', (15331, 15340), True, 'import nifgen._attributes as _attributes\n'), ((15884, 15921), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150375)'], {}), '(1150375)\n', (15912, 15921), True, 'import nifgen._attributes as _attributes\n'), ((16938, 16975), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150374)'], {}), '(1150374)\n', (16966, 16975), True, 'import nifgen._attributes as _attributes\n'), ((17798, 17835), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150236)'], {}), '(1150236)\n', (17826, 17835), True, 'import nifgen._attributes as _attributes\n'), ((18040, 18145), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.ScriptTriggerDigitalEdgeEdge', '(1150292)'], {}), '(_attributes.AttributeViInt32, enums.\n ScriptTriggerDigitalEdgeEdge, 1150292)\n', (18065, 18145), True, 'import nifgen._attributes as _attributes\n'), ((18733, 18771), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150291)'], {}), '(1150291)\n', (18762, 18771), True, 'import nifgen._attributes as _attributes\n'), ((19334, 19438), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.StartTriggerDigitalEdgeEdge', '(1150282)'], {}), '(_attributes.AttributeViInt32, enums.\n StartTriggerDigitalEdgeEdge, 1150282)\n', (19359, 19438), True, 'import nifgen._attributes as _attributes\n'), ((19658, 19696), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150281)'], {}), '(1150281)\n', (19687, 19696), True, 'import nifgen._attributes as _attributes\n'), ((19884, 19923), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1150102)'], {}), '(1150102)\n', (19914, 19923), True, 'import nifgen._attributes as _attributes\n'), ((20269, 20307), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150218)'], {}), '(1150218)\n', (20298, 20307), True, 'import nifgen._attributes as _attributes\n'), ((20613, 20651), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150254)'], {}), '(1150254)\n', (20642, 20651), True, 'import nifgen._attributes as _attributes\n'), ((21543, 21582), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1150101)'], {}), '(1150101)\n', (21573, 21582), True, 'import nifgen._attributes as _attributes\n'), ((21729, 21766), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150237)'], {}), '(1150237)\n', (21757, 21766), True, 'import nifgen._attributes as _attributes\n'), ((21905, 21943), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150315)'], {}), '(1150315)\n', (21934, 21943), True, 'import nifgen._attributes as _attributes\n'), ((22048, 22086), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050007)'], {}), '(1050007)\n', (22077, 22086), True, 'import nifgen._attributes as _attributes\n'), ((22373, 22411), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150322)'], {}), '(1150322)\n', (22402, 22411), True, 'import nifgen._attributes as _attributes\n'), ((22560, 22598), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150321)'], {}), '(1150321)\n', (22589, 22598), True, 'import nifgen._attributes as _attributes\n'), ((22728, 22765), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150219)'], {}), '(1150219)\n', (22756, 22765), True, 'import nifgen._attributes as _attributes\n'), ((23162, 23200), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150320)'], {}), '(1150320)\n', (23191, 23200), True, 'import nifgen._attributes as _attributes\n'), ((23336, 23373), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150230)'], {}), '(1150230)\n', (23364, 23373), True, 'import nifgen._attributes as _attributes\n'), ((23787, 23825), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150329)'], {}), '(1150329)\n', (23816, 23825), True, 'import nifgen._attributes as _attributes\n'), ((24500, 24538), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150295)'], {}), '(1150295)\n', (24529, 24538), True, 'import nifgen._attributes as _attributes\n'), ((25217, 25255), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150283)'], {}), '(1150283)\n', (25246, 25255), True, 'import nifgen._attributes as _attributes\n'), ((25394, 25431), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150233)'], {}), '(1150233)\n', (25422, 25431), True, 'import nifgen._attributes as _attributes\n'), ((25543, 25581), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150376)'], {}), '(1150376)\n', (25572, 25581), True, 'import nifgen._attributes as _attributes\n'), ((26027, 26064), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150240)'], {}), '(1150240)\n', (26055, 26064), True, 'import nifgen._attributes as _attributes\n'), ((26290, 26328), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150104)'], {}), '(1150104)\n', (26319, 26328), True, 'import nifgen._attributes as _attributes\n'), ((26756, 26795), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1150323)'], {}), '(1150323)\n', (26786, 26795), True, 'import nifgen._attributes as _attributes\n'), ((27093, 27131), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150412)'], {}), '(1150412)\n', (27122, 27131), True, 'import nifgen._attributes as _attributes\n'), ((27258, 27296), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150214)'], {}), '(1150214)\n', (27287, 27296), True, 'import nifgen._attributes as _attributes\n'), ((27436, 27473), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150208)'], {}), '(1150208)\n', (27464, 27473), True, 'import nifgen._attributes as _attributes\n'), ((27706, 27744), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250102)'], {}), '(1250102)\n', (27735, 27744), True, 'import nifgen._attributes as _attributes\n'), ((28183, 28220), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150238)'], {}), '(1150238)\n', (28211, 28220), True, 'import nifgen._attributes as _attributes\n'), ((28630, 28668), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250103)'], {}), '(1250103)\n', (28659, 28668), True, 'import nifgen._attributes as _attributes\n'), ((29124, 29162), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250106)'], {}), '(1250106)\n', (29153, 29162), True, 'import nifgen._attributes as _attributes\n'), ((29554, 29592), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250104)'], {}), '(1250104)\n', (29583, 29592), True, 'import nifgen._attributes as _attributes\n'), ((30065, 30102), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150239)'], {}), '(1150239)\n', (30093, 30102), True, 'import nifgen._attributes as _attributes\n'), ((30595, 30633), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250105)'], {}), '(1250105)\n', (30624, 30633), True, 'import nifgen._attributes as _attributes\n'), ((31207, 31292), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.Waveform', '(1250101)'], {}), '(_attributes.AttributeViInt32, enums.Waveform, 1250101\n )\n', (31232, 31292), True, 'import nifgen._attributes as _attributes\n'), ((31913, 32001), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.IdleBehavior', '(1150377)'], {}), '(_attributes.AttributeViInt32, enums.IdleBehavior,\n 1150377)\n', (31938, 32001), True, 'import nifgen._attributes as _attributes\n'), ((32243, 32280), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150378)'], {}), '(1150378)\n', (32271, 32280), True, 'import nifgen._attributes as _attributes\n'), ((32458, 32496), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050510)'], {}), '(1050510)\n', (32487, 32496), True, 'import nifgen._attributes as _attributes\n'), ((32660, 32698), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050511)'], {}), '(1050511)\n', (32689, 32698), True, 'import nifgen._attributes as _attributes\n'), ((32837, 32875), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050512)'], {}), '(1050512)\n', (32866, 32875), True, 'import nifgen._attributes as _attributes\n'), ((33028, 33066), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050304)'], {}), '(1050304)\n', (33057, 33066), True, 'import nifgen._attributes as _attributes\n'), ((33463, 33501), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150220)'], {}), '(1150220)\n', (33492, 33501), True, 'import nifgen._attributes as _attributes\n'), ((34075, 34113), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050305)'], {}), '(1050305)\n', (34104, 34113), True, 'import nifgen._attributes as _attributes\n'), ((34679, 34716), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150271)'], {}), '(1150271)\n', (34707, 34716), True, 'import nifgen._attributes as _attributes\n'), ((34901, 34939), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150312)'], {}), '(1150312)\n', (34930, 34939), True, 'import nifgen._attributes as _attributes\n'), ((35425, 35463), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150213)'], {}), '(1150213)\n', (35454, 35463), True, 'import nifgen._attributes as _attributes\n'), ((35592, 35629), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150211)'], {}), '(1150211)\n', (35620, 35629), True, 'import nifgen._attributes as _attributes\n'), ((35751, 35788), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250215)'], {}), '(1250215)\n', (35779, 35788), True, 'import nifgen._attributes as _attributes\n'), ((36003, 36040), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150209)'], {}), '(1150209)\n', (36031, 36040), True, 'import nifgen._attributes as _attributes\n'), ((36170, 36207), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250212)'], {}), '(1250212)\n', (36198, 36207), True, 'import nifgen._attributes as _attributes\n'), ((36406, 36443), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250205)'], {}), '(1250205)\n', (36434, 36443), True, 'import nifgen._attributes as _attributes\n'), ((36644, 36681), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250214)'], {}), '(1250214)\n', (36672, 36681), True, 'import nifgen._attributes as _attributes\n'), ((36894, 36931), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250208)'], {}), '(1250208)\n', (36922, 36931), True, 'import nifgen._attributes as _attributes\n'), ((37178, 37215), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150242)'], {}), '(1150242)\n', (37206, 37215), True, 'import nifgen._attributes as _attributes\n'), ((37338, 37376), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150212)'], {}), '(1150212)\n', (37367, 37376), True, 'import nifgen._attributes as _attributes\n'), ((37506, 37543), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150210)'], {}), '(1150210)\n', (37534, 37543), True, 'import nifgen._attributes as _attributes\n'), ((37680, 37717), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250213)'], {}), '(1250213)\n', (37708, 37717), True, 'import nifgen._attributes as _attributes\n'), ((37930, 37967), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250207)'], {}), '(1250207)\n', (37958, 37967), True, 'import nifgen._attributes as _attributes\n'), ((38176, 38214), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150390)'], {}), '(1150390)\n', (38205, 38214), True, 'import nifgen._attributes as _attributes\n'), ((38354, 38391), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1050203)'], {}), '(1050203)\n', (38382, 38391), True, 'import nifgen._attributes as _attributes\n'), ((38651, 38690), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1250003)'], {}), '(1250003)\n', (38681, 38690), True, 'import nifgen._attributes as _attributes\n'), ((38870, 38908), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1250004)'], {}), '(1250004)\n', (38899, 38908), True, 'import nifgen._attributes as _attributes\n'), ((39421, 39508), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.OutputMode', '(1250001)'], {}), '(_attributes.AttributeViInt32, enums.OutputMode, \n 1250001)\n', (39446, 39508), True, 'import nifgen._attributes as _attributes\n'), ((39958, 39996), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150310)'], {}), '(1150310)\n', (39987, 39996), True, 'import nifgen._attributes as _attributes\n'), ((40122, 40220), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViString', 'enums.ReferenceClockSource', '(1150113)'], {}), '(_attributes.AttributeViString, enums.\n ReferenceClockSource, 1150113)\n', (40147, 40220), True, 'import nifgen._attributes as _attributes\n'), ((40845, 40883), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150107)'], {}), '(1150107)\n', (40874, 40883), True, 'import nifgen._attributes as _attributes\n'), ((41113, 41208), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViString', 'enums.SampleClockSource', '(1150112)'], {}), '(_attributes.AttributeViString, enums.\n SampleClockSource, 1150112)\n', (41138, 41208), True, 'import nifgen._attributes as _attributes\n'), ((41823, 41861), 'nifgen._attributes.AttributeViReal64', '_attributes.AttributeViReal64', (['(1150368)'], {}), '(1150368)\n', (41852, 41861), True, 'import nifgen._attributes as _attributes\n'), ((42224, 42327), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViString', 'enums.SampleClockTimebaseSource', '(1150367)'], {}), '(_attributes.AttributeViString, enums.\n SampleClockTimebaseSource, 1150367)\n', (42249, 42327), True, 'import nifgen._attributes as _attributes\n'), ((42651, 42689), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150270)'], {}), '(1150270)\n', (42680, 42689), True, 'import nifgen._attributes as _attributes\n'), ((43190, 43227), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150272)'], {}), '(1150272)\n', (43218, 43227), True, 'import nifgen._attributes as _attributes\n'), ((43413, 43507), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.ScriptTriggerType', '(1150290)'], {}), '(_attributes.AttributeViInt32, enums.\n ScriptTriggerType, 1150290)\n', (43438, 43507), True, 'import nifgen._attributes as _attributes\n'), ((44100, 44138), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150243)'], {}), '(1150243)\n', (44129, 44138), True, 'import nifgen._attributes as _attributes\n'), ((44222, 44261), 'nifgen._attributes.AttributeViBoolean', '_attributes.AttributeViBoolean', (['(1050005)'], {}), '(1050005)\n', (44252, 44261), True, 'import nifgen._attributes as _attributes\n'), ((44800, 44838), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050514)'], {}), '(1050514)\n', (44829, 44838), True, 'import nifgen._attributes as _attributes\n'), ((44929, 44966), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1050503)'], {}), '(1050503)\n', (44957, 44966), True, 'import nifgen._attributes as _attributes\n'), ((45062, 45099), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1050504)'], {}), '(1050504)\n', (45090, 45099), True, 'import nifgen._attributes as _attributes\n'), ((45206, 45244), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050551)'], {}), '(1050551)\n', (45235, 45244), True, 'import nifgen._attributes as _attributes\n'), ((45374, 45412), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050513)'], {}), '(1050513)\n', (45403, 45412), True, 'import nifgen._attributes as _attributes\n'), ((45548, 45586), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150314)'], {}), '(1150314)\n', (45577, 45586), True, 'import nifgen._attributes as _attributes\n'), ((45700, 45793), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.StartTriggerType', '(1150280)'], {}), '(_attributes.AttributeViInt32, enums.\n StartTriggerType, 1150280)\n', (45725, 45793), True, 'import nifgen._attributes as _attributes\n'), ((46027, 46064), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150325)'], {}), '(1150325)\n', (46055, 46064), True, 'import nifgen._attributes as _attributes\n'), ((46967, 47004), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150324)'], {}), '(1150324)\n', (46995, 47004), True, 'import nifgen._attributes as _attributes\n'), ((47301, 47339), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1150326)'], {}), '(1150326)\n', (47330, 47339), True, 'import nifgen._attributes as _attributes\n'), ((47624, 47678), 'nifgen._attributes.AttributeViReal64TimeDeltaSeconds', '_attributes.AttributeViReal64TimeDeltaSeconds', (['(1150409)'], {}), '(1150409)\n', (47669, 47678), True, 'import nifgen._attributes as _attributes\n'), ((47864, 47902), 'nifgen._attributes.AttributeViString', '_attributes.AttributeViString', (['(1050327)'], {}), '(1050327)\n', (47893, 47902), True, 'import nifgen._attributes as _attributes\n'), ((48130, 48228), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.TerminalConfiguration', '(1150365)'], {}), '(_attributes.AttributeViInt32, enums.\n TerminalConfiguration, 1150365)\n', (48155, 48228), True, 'import nifgen._attributes as _attributes\n'), ((48404, 48492), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.TriggerMode', '(1150108)'], {}), '(_attributes.AttributeViInt32, enums.TriggerMode, \n 1150108)\n', (48429, 48492), True, 'import nifgen._attributes as _attributes\n'), ((48579, 48667), 'nifgen._attributes.AttributeEnum', '_attributes.AttributeEnum', (['_attributes.AttributeViInt32', 'enums.WaitBehavior', '(1150379)'], {}), '(_attributes.AttributeViInt32, enums.WaitBehavior,\n 1150379)\n', (48604, 48667), True, 'import nifgen._attributes as _attributes\n'), ((48935, 48972), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1150380)'], {}), '(1150380)\n', (48963, 48972), True, 'import nifgen._attributes as _attributes\n'), ((49134, 49171), 'nifgen._attributes.AttributeViInt32', '_attributes.AttributeViInt32', (['(1250206)'], {}), '(1250206)\n', (49162, 49171), True, 'import nifgen._attributes as _attributes\n'), ((3322, 3398), 'nifgen._converters.convert_repeated_capabilities', '_converters.convert_repeated_capabilities', (['repeated_capability', 'self._prefix'], {}), '(repeated_capability, self._prefix)\n', (3363, 3398), True, 'import nifgen._converters as _converters\n'), ((52736, 52765), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (52755, 52765), True, 'import nifgen._visatype as _visatype\n'), ((53038, 53070), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_size'], {}), '(waveform_size)\n', (53055, 53070), True, 'import nifgen._visatype as _visatype\n'), ((53228, 53317), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (53247, 53317), True, 'import nifgen.errors as errors\n'), ((54466, 54495), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (54485, 54495), True, 'import nifgen._visatype as _visatype\n'), ((54659, 54691), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_size'], {}), '(waveform_size)\n', (54676, 54691), True, 'import nifgen._visatype as _visatype\n'), ((54737, 54756), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (54754, 54756), True, 'import nifgen._visatype as _visatype\n'), ((54972, 55061), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (54991, 55061), True, 'import nifgen.errors as errors\n'), ((55729, 55758), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (55748, 55758), True, 'import nifgen._visatype as _visatype\n'), ((55998, 56087), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (56017, 56087), True, 'import nifgen.errors as errors\n'), ((58696, 58725), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (58715, 58725), True, 'import nifgen._visatype as _visatype\n'), ((58891, 58925), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['sequence_handle'], {}), '(sequence_handle)\n', (58908, 58925), True, 'import nifgen._visatype as _visatype\n'), ((58960, 58984), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['gain'], {}), '(gain)\n', (58978, 58984), True, 'import nifgen._visatype as _visatype\n'), ((59021, 59047), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['offset'], {}), '(offset)\n', (59039, 59047), True, 'import nifgen._visatype as _visatype\n'), ((59211, 59300), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (59230, 59300), True, 'import nifgen.errors as errors\n'), ((62235, 62264), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (62254, 62264), True, 'import nifgen._visatype as _visatype\n'), ((62430, 62464), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_handle'], {}), '(waveform_handle)\n', (62447, 62464), True, 'import nifgen._visatype as _visatype\n'), ((62499, 62523), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['gain'], {}), '(gain)\n', (62517, 62523), True, 'import nifgen._visatype as _visatype\n'), ((62560, 62586), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['offset'], {}), '(offset)\n', (62578, 62586), True, 'import nifgen._visatype as _visatype\n'), ((62750, 62839), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (62769, 62839), True, 'import nifgen.errors as errors\n'), ((66366, 66395), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (66385, 66395), True, 'import nifgen._visatype as _visatype\n'), ((66567, 66607), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['frequency_list_handle'], {}), '(frequency_list_handle)\n', (66584, 66607), True, 'import nifgen._visatype as _visatype\n'), ((66647, 66676), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['amplitude'], {}), '(amplitude)\n', (66665, 66676), True, 'import nifgen._visatype as _visatype\n'), ((66716, 66745), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['dc_offset'], {}), '(dc_offset)\n', (66734, 66745), True, 'import nifgen._visatype as _visatype\n'), ((66787, 66818), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['start_phase'], {}), '(start_phase)\n', (66805, 66818), True, 'import nifgen._visatype as _visatype\n'), ((67012, 67101), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (67031, 67101), True, 'import nifgen.errors as errors\n'), ((74234, 74263), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (74253, 74263), True, 'import nifgen._visatype as _visatype\n'), ((74422, 74455), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform.value'], {}), '(waveform.value)\n', (74439, 74455), True, 'import nifgen._visatype as _visatype\n'), ((74495, 74524), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['amplitude'], {}), '(amplitude)\n', (74513, 74524), True, 'import nifgen._visatype as _visatype\n'), ((74564, 74593), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['dc_offset'], {}), '(dc_offset)\n', (74582, 74593), True, 'import nifgen._visatype as _visatype\n'), ((74633, 74662), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['frequency'], {}), '(frequency)\n', (74651, 74662), True, 'import nifgen._visatype as _visatype\n'), ((74704, 74735), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['start_phase'], {}), '(start_phase)\n', (74722, 74735), True, 'import nifgen._visatype as _visatype\n'), ((74941, 75030), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (74960, 75030), True, 'import nifgen.errors as errors\n'), ((79027, 79056), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (79046, 79056), True, 'import nifgen._visatype as _visatype\n'), ((79605, 79624), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (79622, 79624), True, 'import nifgen._visatype as _visatype\n'), ((79868, 79957), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (79887, 79957), True, 'import nifgen.errors as errors\n'), ((82253, 82282), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (82272, 82282), True, 'import nifgen._visatype as _visatype\n'), ((82679, 82698), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (82696, 82698), True, 'import nifgen._visatype as _visatype\n'), ((82942, 83031), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (82961, 83031), True, 'import nifgen.errors as errors\n'), ((86180, 86209), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (86199, 86209), True, 'import nifgen._visatype as _visatype\n'), ((86471, 86506), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['byte_order.value'], {}), '(byte_order.value)\n', (86488, 86506), True, 'import nifgen._visatype as _visatype\n'), ((86552, 86571), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (86569, 86571), True, 'import nifgen._visatype as _visatype\n'), ((86810, 86899), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (86829, 86899), True, 'import nifgen.errors as errors\n'), ((90081, 90110), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (90100, 90110), True, 'import nifgen._visatype as _visatype\n'), ((90372, 90407), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['byte_order.value'], {}), '(byte_order.value)\n', (90389, 90407), True, 'import nifgen._visatype as _visatype\n'), ((90453, 90472), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (90470, 90472), True, 'import nifgen._visatype as _visatype\n'), ((90711, 90800), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (90730, 90800), True, 'import nifgen.errors as errors\n'), ((93096, 93125), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (93115, 93125), True, 'import nifgen._visatype as _visatype\n'), ((93522, 93541), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (93539, 93541), True, 'import nifgen._visatype as _visatype\n'), ((93785, 93874), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (93804, 93874), True, 'import nifgen.errors as errors\n'), ((95548, 95577), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (95567, 95577), True, 'import nifgen._visatype as _visatype\n'), ((96130, 96219), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (96149, 96219), True, 'import nifgen.errors as errors\n'), ((97114, 97143), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (97133, 97143), True, 'import nifgen._visatype as _visatype\n'), ((97507, 97596), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (97526, 97596), True, 'import nifgen.errors as errors\n'), ((98366, 98395), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (98385, 98395), True, 'import nifgen._visatype as _visatype\n'), ((98746, 98835), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (98765, 98835), True, 'import nifgen.errors as errors\n'), ((101200, 101229), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (101219, 101229), True, 'import nifgen._visatype as _visatype\n'), ((101392, 101422), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (101408, 101422), True, 'import nifgen._visatype as _visatype\n'), ((101468, 101489), 'nifgen._visatype.ViBoolean', '_visatype.ViBoolean', ([], {}), '()\n', (101487, 101489), True, 'import nifgen._visatype as _visatype\n'), ((101709, 101798), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (101728, 101798), True, 'import nifgen.errors as errors\n'), ((103071, 103100), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (103090, 103100), True, 'import nifgen._visatype as _visatype\n'), ((103263, 103293), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (103279, 103293), True, 'import nifgen._visatype as _visatype\n'), ((103339, 103358), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (103356, 103358), True, 'import nifgen._visatype as _visatype\n'), ((103576, 103665), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (103595, 103665), True, 'import nifgen.errors as errors\n'), ((104952, 104981), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (104971, 104981), True, 'import nifgen._visatype as _visatype\n'), ((105144, 105174), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (105160, 105174), True, 'import nifgen._visatype as _visatype\n'), ((105220, 105240), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (105238, 105240), True, 'import nifgen._visatype as _visatype\n'), ((105459, 105548), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (105478, 105548), True, 'import nifgen.errors as errors\n'), ((108609, 108638), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (108628, 108638), True, 'import nifgen._visatype as _visatype\n'), ((108801, 108831), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (108817, 108831), True, 'import nifgen._visatype as _visatype\n'), ((108872, 108891), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (108889, 108891), True, 'import nifgen._visatype as _visatype\n'), ((109117, 109205), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=False)\n', (109136, 109205), True, 'import nifgen.errors as errors\n'), ((109229, 109258), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['error_code'], {}), '(error_code)\n', (109246, 109258), True, 'import nifgen._visatype as _visatype\n'), ((109525, 109614), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (109544, 109614), True, 'import nifgen.errors as errors\n'), ((111408, 111437), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (111427, 111437), True, 'import nifgen._visatype as _visatype\n'), ((111478, 111498), 'nifgen._visatype.ViStatus', '_visatype.ViStatus', ([], {}), '()\n', (111496, 111498), True, 'import nifgen._visatype as _visatype\n'), ((111558, 111577), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (111575, 111577), True, 'import nifgen._visatype as _visatype\n'), ((111848, 111935), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=True)\n', (111867, 111935), True, 'import nifgen.errors as errors\n'), ((111978, 112007), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['error_code'], {}), '(error_code)\n', (111995, 112007), True, 'import nifgen._visatype as _visatype\n'), ((112338, 112426), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=True)\n', (112357, 112426), True, 'import nifgen.errors as errors\n'), ((114303, 114332), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (114322, 114332), True, 'import nifgen._visatype as _visatype\n'), ((114424, 114512), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=True)\n', (114443, 114512), True, 'import nifgen.errors as errors\n'), ((116581, 116610), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (116600, 116610), True, 'import nifgen._visatype as _visatype\n'), ((116648, 116680), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['trigger.value'], {}), '(trigger.value)\n', (116665, 116680), True, 'import nifgen._visatype as _visatype\n'), ((116914, 117003), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (116933, 117003), True, 'import nifgen.errors as errors\n'), ((119277, 119306), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (119296, 119306), True, 'import nifgen._visatype as _visatype\n'), ((119469, 119499), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (119485, 119499), True, 'import nifgen._visatype as _visatype\n'), ((119545, 119581), 'nifgen._visatype.ViBoolean', '_visatype.ViBoolean', (['attribute_value'], {}), '(attribute_value)\n', (119564, 119581), True, 'import nifgen._visatype as _visatype\n'), ((119740, 119829), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (119759, 119829), True, 'import nifgen.errors as errors\n'), ((122096, 122125), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (122115, 122125), True, 'import nifgen._visatype as _visatype\n'), ((122288, 122318), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (122304, 122318), True, 'import nifgen._visatype as _visatype\n'), ((122364, 122398), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['attribute_value'], {}), '(attribute_value)\n', (122381, 122398), True, 'import nifgen._visatype as _visatype\n'), ((122555, 122644), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (122574, 122644), True, 'import nifgen.errors as errors\n'), ((124916, 124945), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (124935, 124945), True, 'import nifgen._visatype as _visatype\n'), ((125108, 125138), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (125124, 125138), True, 'import nifgen._visatype as _visatype\n'), ((125184, 125219), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', (['attribute_value'], {}), '(attribute_value)\n', (125202, 125219), True, 'import nifgen._visatype as _visatype\n'), ((125377, 125466), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (125396, 125466), True, 'import nifgen.errors as errors\n'), ((127736, 127765), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (127755, 127765), True, 'import nifgen._visatype as _visatype\n'), ((127928, 127958), 'nifgen._visatype.ViAttr', '_visatype.ViAttr', (['attribute_id'], {}), '(attribute_id)\n', (127944, 127958), True, 'import nifgen._visatype as _visatype\n'), ((128229, 128318), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (128248, 128318), True, 'import nifgen.errors as errors\n'), ((130896, 130925), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (130915, 130925), True, 'import nifgen._visatype as _visatype\n'), ((131196, 131232), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['relative_to.value'], {}), '(relative_to.value)\n', (131213, 131232), True, 'import nifgen._visatype as _visatype\n'), ((131269, 131294), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['offset'], {}), '(offset)\n', (131286, 131294), True, 'import nifgen._visatype as _visatype\n'), ((131476, 131565), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (131495, 131565), True, 'import nifgen.errors as errors\n'), ((136788, 136817), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (136807, 136817), True, 'import nifgen._visatype as _visatype\n'), ((136983, 137017), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_handle'], {}), '(waveform_handle)\n', (137000, 137017), True, 'import nifgen._visatype as _visatype\n'), ((137059, 137095), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['relative_to.value'], {}), '(relative_to.value)\n', (137076, 137095), True, 'import nifgen._visatype as _visatype\n'), ((137132, 137157), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['offset'], {}), '(offset)\n', (137149, 137157), True, 'import nifgen._visatype as _visatype\n'), ((137336, 137425), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (137355, 137425), True, 'import nifgen.errors as errors\n'), ((137660, 137689), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (137679, 137689), True, 'import nifgen._visatype as _visatype\n'), ((137783, 137871), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=True)\n', (137802, 137871), True, 'import nifgen.errors as errors\n'), ((139939, 139968), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (139958, 139968), True, 'import nifgen._visatype as _visatype\n'), ((140134, 140168), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_handle'], {}), '(waveform_handle)\n', (140151, 140168), True, 'import nifgen._visatype as _visatype\n'), ((140493, 140582), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (140512, 140582), True, 'import nifgen.errors as errors\n'), ((142347, 142376), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (142366, 142376), True, 'import nifgen._visatype as _visatype\n'), ((143050, 143139), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (143069, 143139), True, 'import nifgen.errors as errors\n'), ((145330, 145359), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (145349, 145359), True, 'import nifgen._visatype as _visatype\n'), ((145911, 146000), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (145930, 146000), True, 'import nifgen.errors as errors\n'), ((147831, 147860), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (147850, 147860), True, 'import nifgen._visatype as _visatype\n'), ((148412, 148501), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (148431, 148501), True, 'import nifgen.errors as errors\n'), ((149439, 149468), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (149458, 149468), True, 'import nifgen._visatype as _visatype\n'), ((149803, 149892), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (149822, 149892), True, 'import nifgen.errors as errors\n'), ((151695, 151724), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (151714, 151724), True, 'import nifgen._visatype as _visatype\n'), ((151890, 151924), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_handle'], {}), '(waveform_handle)\n', (151907, 151924), True, 'import nifgen._visatype as _visatype\n'), ((152363, 152452), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (152382, 152452), True, 'import nifgen.errors as errors\n'), ((154681, 154710), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (154700, 154710), True, 'import nifgen._visatype as _visatype\n'), ((154876, 154910), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_handle'], {}), '(waveform_handle)\n', (154893, 154910), True, 'import nifgen._visatype as _visatype\n'), ((155227, 155316), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (155246, 155316), True, 'import nifgen.errors as errors\n'), ((158709, 158738), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (158728, 158738), True, 'import nifgen._visatype as _visatype\n'), ((158779, 158809), 'nifgen._visatype.ViStatus', '_visatype.ViStatus', (['error_code'], {}), '(error_code)\n', (158797, 158809), True, 'import nifgen._visatype as _visatype\n'), ((159006, 159094), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(True)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=True)\n', (159025, 159094), True, 'import nifgen.errors as errors\n'), ((165746, 165811), 'nifgen._converters.convert_repeated_capabilities_from_init', '_converters.convert_repeated_capabilities_from_init', (['channel_name'], {}), '(channel_name)\n', (165797, 165811), True, 'import nifgen._converters as _converters\n'), ((165830, 165887), 'nifgen._converters.convert_init_with_options_dictionary', '_converters.convert_init_with_options_dictionary', (['options'], {}), '(options)\n', (165878, 165887), True, 'import nifgen._converters as _converters\n'), ((165912, 165936), 'nifgen._library_singleton.get', '_library_singleton.get', ([], {}), '()\n', (165934, 165936), True, 'import nifgen._library_singleton as _library_singleton\n'), ((166225, 166258), 'nitclk.SessionReference', 'nitclk.SessionReference', (['self._vi'], {}), '(self._vi)\n', (166248, 166258), False, 'import nitclk\n'), ((168977, 169006), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (168996, 169006), True, 'import nifgen._visatype as _visatype\n'), ((169096, 169185), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (169115, 169185), True, 'import nifgen.errors as errors\n'), ((169636, 169665), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (169655, 169665), True, 'import nifgen._visatype as _visatype\n'), ((169754, 169843), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (169773, 169843), True, 'import nifgen.errors as errors\n'), ((170990, 171019), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (171009, 171019), True, 'import nifgen._visatype as _visatype\n'), ((171065, 171099), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['sequence_handle'], {}), '(sequence_handle)\n', (171082, 171099), True, 'import nifgen._visatype as _visatype\n'), ((171213, 171302), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (171232, 171302), True, 'import nifgen.errors as errors\n'), ((172709, 172738), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (172728, 172738), True, 'import nifgen._visatype as _visatype\n'), ((172784, 172818), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform_handle'], {}), '(waveform_handle)\n', (172801, 172818), True, 'import nifgen._visatype as _visatype\n'), ((172932, 173021), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (172951, 173021), True, 'import nifgen.errors as errors\n'), ((174187, 174216), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (174206, 174216), True, 'import nifgen._visatype as _visatype\n'), ((174268, 174308), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['frequency_list_handle'], {}), '(frequency_list_handle)\n', (174285, 174308), True, 'import nifgen._visatype as _visatype\n'), ((174425, 174514), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (174444, 174514), True, 'import nifgen.errors as errors\n'), ((175809, 175838), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (175828, 175838), True, 'import nifgen._visatype as _visatype\n'), ((175919, 176008), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (175938, 176008), True, 'import nifgen.errors as errors\n'), ((181615, 181644), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (181634, 181644), True, 'import nifgen._visatype as _visatype\n'), ((183378, 183397), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (183395, 183397), True, 'import nifgen._visatype as _visatype\n'), ((183744, 183833), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (183763, 183833), True, 'import nifgen.errors as errors\n'), ((186936, 186965), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (186955, 186965), True, 'import nifgen._visatype as _visatype\n'), ((187660, 187679), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (187677, 187679), True, 'import nifgen._visatype as _visatype\n'), ((187933, 188022), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (187952, 188022), True, 'import nifgen.errors as errors\n'), ((193696, 193725), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (193715, 193725), True, 'import nifgen._visatype as _visatype\n'), ((193764, 193797), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['waveform.value'], {}), '(waveform.value)\n', (193781, 193797), True, 'import nifgen._visatype as _visatype\n'), ((194449, 194468), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (194466, 194468), True, 'import nifgen._visatype as _visatype\n'), ((194743, 194832), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (194762, 194832), True, 'import nifgen.errors as errors\n'), ((195179, 195208), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (195198, 195208), True, 'import nifgen._visatype as _visatype\n'), ((195290, 195379), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (195309, 195379), True, 'import nifgen.errors as errors\n'), ((196150, 196179), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (196169, 196179), True, 'import nifgen._visatype as _visatype\n'), ((196223, 196242), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (196240, 196242), True, 'import nifgen._visatype as _visatype\n'), ((196441, 196529), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=False)\n', (196460, 196529), True, 'import nifgen.errors as errors\n'), ((196556, 196585), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['error_code'], {}), '(error_code)\n', (196573, 196585), True, 'import nifgen._visatype as _visatype\n'), ((196697, 196739), 'array.array', 'array.array', (['"""b"""', '([0] * configuration_size)'], {}), "('b', [0] * configuration_size)\n", (196708, 196739), False, 'import array\n'), ((197021, 197110), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (197040, 197110), True, 'import nifgen.errors as errors\n'), ((197122, 197171), 'nifgen._converters.convert_to_bytes', '_converters.convert_to_bytes', (['configuration_array'], {}), '(configuration_array)\n', (197150, 197171), True, 'import nifgen._converters as _converters\n'), ((198069, 198098), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (198088, 198098), True, 'import nifgen._visatype as _visatype\n'), ((198323, 198412), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (198342, 198412), True, 'import nifgen.errors as errors\n'), ((199034, 199063), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (199053, 199063), True, 'import nifgen._visatype as _visatype\n'), ((199099, 199123), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['index'], {}), '(index)\n', (199116, 199123), True, 'import nifgen._visatype as _visatype\n'), ((199165, 199184), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (199182, 199184), True, 'import nifgen._visatype as _visatype\n'), ((199376, 199464), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(True)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=True,\n is_error_handling=False)\n', (199395, 199464), True, 'import nifgen.errors as errors\n'), ((199489, 199518), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', (['error_code'], {}), '(error_code)\n', (199506, 199518), True, 'import nifgen._visatype as _visatype\n'), ((199752, 199841), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (199771, 199841), True, 'import nifgen.errors as errors\n'), ((200767, 200796), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (200786, 200796), True, 'import nifgen._visatype as _visatype\n'), ((200831, 200850), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (200848, 200850), True, 'import nifgen._visatype as _visatype\n'), ((200886, 200905), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (200903, 200905), True, 'import nifgen._visatype as _visatype\n'), ((200939, 200958), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (200956, 200958), True, 'import nifgen._visatype as _visatype\n'), ((200993, 201012), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (201010, 201012), True, 'import nifgen._visatype as _visatype\n'), ((201049, 201068), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (201066, 201068), True, 'import nifgen._visatype as _visatype\n'), ((201481, 201570), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (201500, 201570), True, 'import nifgen.errors as errors\n'), ((202106, 202135), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (202125, 202135), True, 'import nifgen._visatype as _visatype\n'), ((202177, 202197), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (202195, 202197), True, 'import nifgen._visatype as _visatype\n'), ((202365, 202454), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (202384, 202454), True, 'import nifgen.errors as errors\n'), ((202887, 202916), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (202906, 202916), True, 'import nifgen._visatype as _visatype\n'), ((202953, 202972), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (202970, 202972), True, 'import nifgen._visatype as _visatype\n'), ((203141, 203230), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (203160, 203230), True, 'import nifgen.errors as errors\n'), ((204954, 204983), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (204973, 204983), True, 'import nifgen._visatype as _visatype\n'), ((205019, 205038), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (205036, 205038), True, 'import nifgen._visatype as _visatype\n'), ((205193, 205282), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (205212, 205282), True, 'import nifgen.errors as errors\n'), ((205294, 205332), 'nifgen.enums.HardwareState', 'enums.HardwareState', (['state_ctype.value'], {}), '(state_ctype.value)\n', (205313, 205332), True, 'import nifgen.enums as enums\n'), ((205931, 205980), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour', 'minute'], {}), '(year, month, day, hour, minute)\n', (205948, 205980), False, 'import datetime\n'), ((206394, 206443), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour', 'minute'], {}), '(year, month, day, hour, minute)\n', (206411, 206443), False, 'import datetime\n'), ((207645, 207674), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (207664, 207674), True, 'import nifgen._visatype as _visatype\n'), ((207709, 207728), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (207726, 207728), True, 'import nifgen._visatype as _visatype\n'), ((207764, 207783), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (207781, 207783), True, 'import nifgen._visatype as _visatype\n'), ((207817, 207836), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (207834, 207836), True, 'import nifgen._visatype as _visatype\n'), ((207871, 207890), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (207888, 207890), True, 'import nifgen._visatype as _visatype\n'), ((207927, 207946), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (207944, 207946), True, 'import nifgen._visatype as _visatype\n'), ((208360, 208449), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (208379, 208449), True, 'import nifgen.errors as errors\n'), ((208983, 209012), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (209002, 209012), True, 'import nifgen._visatype as _visatype\n'), ((209054, 209074), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (209072, 209074), True, 'import nifgen._visatype as _visatype\n'), ((209243, 209332), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (209262, 209332), True, 'import nifgen.errors as errors\n'), ((210033, 210062), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (210052, 210062), True, 'import nifgen._visatype as _visatype\n'), ((210111, 210132), 'nifgen._visatype.ViBoolean', '_visatype.ViBoolean', ([], {}), '()\n', (210130, 210132), True, 'import nifgen._visatype as _visatype\n'), ((210316, 210405), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (210335, 210405), True, 'import nifgen.errors as errors\n'), ((211194, 211223), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (211213, 211223), True, 'import nifgen._visatype as _visatype\n'), ((211642, 211731), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (211661, 211731), True, 'import nifgen.errors as errors\n'), ((212604, 212633), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (212623, 212633), True, 'import nifgen._visatype as _visatype\n'), ((212858, 212947), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (212877, 212947), True, 'import nifgen.errors as errors\n'), ((220688, 220721), 'nifgen._visatype.ViBoolean', '_visatype.ViBoolean', (['reset_device'], {}), '(reset_device)\n', (220707, 220721), True, 'import nifgen._visatype as _visatype\n'), ((220863, 220884), 'nifgen._visatype.ViSession', '_visatype.ViSession', ([], {}), '()\n', (220882, 220884), True, 'import nifgen._visatype as _visatype\n'), ((221111, 221200), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (221130, 221200), True, 'import nifgen.errors as errors\n'), ((221610, 221639), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (221629, 221639), True, 'import nifgen._visatype as _visatype\n'), ((221732, 221821), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (221751, 221821), True, 'import nifgen.errors as errors\n'), ((222683, 222712), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (222702, 222712), True, 'import nifgen._visatype as _visatype\n'), ((222747, 222768), 'nifgen._visatype.ViBoolean', '_visatype.ViBoolean', ([], {}), '()\n', (222766, 222768), True, 'import nifgen._visatype as _visatype\n'), ((222911, 223000), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (222930, 223000), True, 'import nifgen.errors as errors\n'), ((224367, 224396), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (224386, 224396), True, 'import nifgen._visatype as _visatype\n'), ((224454, 224473), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (224471, 224473), True, 'import nifgen._visatype as _visatype\n'), ((224527, 224546), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (224544, 224546), True, 'import nifgen._visatype as _visatype\n'), ((224600, 224619), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (224617, 224619), True, 'import nifgen._visatype as _visatype\n'), ((224668, 224687), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (224685, 224687), True, 'import nifgen._visatype as _visatype\n'), ((225183, 225272), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (225202, 225272), True, 'import nifgen.errors as errors\n'), ((227100, 227129), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (227119, 227129), True, 'import nifgen._visatype as _visatype\n'), ((227187, 227206), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (227204, 227206), True, 'import nifgen._visatype as _visatype\n'), ((227253, 227272), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (227270, 227272), True, 'import nifgen._visatype as _visatype\n'), ((227324, 227343), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (227341, 227343), True, 'import nifgen._visatype as _visatype\n'), ((227395, 227414), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (227412, 227414), True, 'import nifgen._visatype as _visatype\n'), ((227898, 227987), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (227917, 227987), True, 'import nifgen.errors as errors\n'), ((230081, 230110), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (230100, 230110), True, 'import nifgen._visatype as _visatype\n'), ((230169, 230188), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (230186, 230188), True, 'import nifgen._visatype as _visatype\n'), ((230248, 230267), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (230265, 230267), True, 'import nifgen._visatype as _visatype\n'), ((230327, 230346), 'nifgen._visatype.ViInt32', '_visatype.ViInt32', ([], {}), '()\n', (230344, 230346), True, 'import nifgen._visatype as _visatype\n'), ((230408, 230428), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (230426, 230428), True, 'import nifgen._visatype as _visatype\n'), ((230490, 230510), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (230508, 230510), True, 'import nifgen._visatype as _visatype\n'), ((230572, 230592), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (230590, 230592), True, 'import nifgen._visatype as _visatype\n'), ((231374, 231463), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (231393, 231463), True, 'import nifgen.errors as errors\n'), ((232181, 232210), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (232200, 232210), True, 'import nifgen._visatype as _visatype\n'), ((232252, 232272), 'nifgen._visatype.ViReal64', '_visatype.ViReal64', ([], {}), '()\n', (232270, 232272), True, 'import nifgen._visatype as _visatype\n'), ((232445, 232534), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (232464, 232534), True, 'import nifgen.errors as errors\n'), ((232974, 233003), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (232993, 233003), True, 'import nifgen._visatype as _visatype\n'), ((233089, 233178), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (233108, 233178), True, 'import nifgen.errors as errors\n'), ((233573, 233602), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (233592, 233602), True, 'import nifgen._visatype as _visatype\n'), ((233694, 233783), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (233713, 233783), True, 'import nifgen.errors as errors\n'), ((234079, 234108), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (234098, 234108), True, 'import nifgen._visatype as _visatype\n'), ((234190, 234279), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (234209, 234279), True, 'import nifgen.errors as errors\n'), ((234353, 234385), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10.0)'}), '(seconds=10.0)\n', (234371, 234385), False, 'import datetime\n'), ((234672, 234701), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (234691, 234701), True, 'import nifgen._visatype as _visatype\n'), ((234740, 234801), 'nifgen._converters.convert_timedelta_to_milliseconds_int32', '_converters.convert_timedelta_to_milliseconds_int32', (['max_time'], {}), '(max_time)\n', (234791, 234801), True, 'import nifgen._converters as _converters\n'), ((234905, 234994), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (234924, 234994), True, 'import nifgen.errors as errors\n'), ((236196, 236225), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (236215, 236225), True, 'import nifgen._visatype as _visatype\n'), ((236305, 236394), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (236324, 236394), True, 'import nifgen.errors as errors\n'), ((237960, 237989), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (237979, 237989), True, 'import nifgen._visatype as _visatype\n'), ((238069, 238158), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (238088, 238158), True, 'import nifgen.errors as errors\n'), ((239312, 239341), 'nifgen._visatype.ViSession', '_visatype.ViSession', (['self._vi'], {}), '(self._vi)\n', (239331, 239341), True, 'import nifgen._visatype as _visatype\n'), ((239388, 239407), 'nifgen._visatype.ViInt16', '_visatype.ViInt16', ([], {}), '()\n', (239405, 239407), True, 'import nifgen._visatype as _visatype\n'), ((239676, 239765), 'nifgen.errors.handle_error', 'errors.handle_error', (['self', 'error_code'], {'ignore_warnings': '(False)', 'is_error_handling': '(False)'}), '(self, error_code, ignore_warnings=False,\n is_error_handling=False)\n', (239695, 239765), True, 'import nifgen.errors as errors\n'), ((849, 877), 'ctypes.POINTER', 'ctypes.POINTER', (['library_type'], {}), '(library_type)\n', (863, 877), False, 'import ctypes\n'), ((972, 1004), 'numpy.ctypeslib.as_ctypes', 'numpy.ctypeslib.as_ctypes', (['value'], {}), '(value)\n', (997, 1004), False, 'import numpy\n'), ((1603, 1633), 'array.array', 'array.array', (['array_type', 'value'], {}), '(array_type, value)\n', (1614, 1633), False, 'import array\n'), ((81922, 81958), 'numpy.isfortran', 'numpy.isfortran', (['waveform_data_array'], {}), '(waveform_data_array)\n', (81937, 81958), False, 'import numpy\n'), ((82082, 82104), 'numpy.dtype', 'numpy.dtype', (['"""float64"""'], {}), "('float64')\n", (82093, 82104), False, 'import numpy\n'), ((92769, 92805), 'numpy.isfortran', 'numpy.isfortran', (['waveform_data_array'], {}), '(waveform_data_array)\n', (92784, 92805), False, 'import numpy\n'), ((92929, 92949), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (92940, 92949), False, 'import numpy\n'), ((115643, 115765), 'warnings.warn', 'warnings.warn', (['"""trigger and trigger_id should now always be passed in to the method"""'], {'category': 'DeprecationWarning'}), "(\n 'trigger and trigger_id should now always be passed in to the method',\n category=DeprecationWarning)\n", (115656, 115765), False, 'import warnings\n'), ((139687, 139708), 'numpy.isfortran', 'numpy.isfortran', (['data'], {}), '(data)\n', (139702, 139708), False, 'import numpy\n'), ((139802, 139822), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (139813, 139822), False, 'import numpy\n'), ((145074, 145095), 'numpy.isfortran', 'numpy.isfortran', (['data'], {}), '(data)\n', (145089, 145095), False, 'import numpy\n'), ((145189, 145211), 'numpy.dtype', 'numpy.dtype', (['"""float64"""'], {}), "('float64')\n", (145200, 145211), False, 'import numpy\n'), ((147579, 147600), 'numpy.isfortran', 'numpy.isfortran', (['data'], {}), '(data)\n', (147594, 147600), False, 'import numpy\n'), ((147694, 147714), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (147705, 147714), False, 'import numpy\n'), ((154425, 154446), 'numpy.isfortran', 'numpy.isfortran', (['data'], {}), '(data)\n', (154440, 154446), False, 'import numpy\n'), ((154540, 154562), 'numpy.dtype', 'numpy.dtype', (['"""float64"""'], {}), "('float64')\n", (154551, 154562), False, 'import numpy\n'), ((237389, 237420), 'nifgen.errors.SelfTestError', 'errors.SelfTestError', (['code', 'msg'], {}), '(code, msg)\n', (237409, 237420), True, 'import nifgen.errors as errors\n'), ((54924, 54961), 'ctypes.pointer', 'ctypes.pointer', (['waveform_handle_ctype'], {}), '(waveform_handle_ctype)\n', (54938, 54961), False, 'import ctypes\n'), ((79820, 79857), 'ctypes.pointer', 'ctypes.pointer', (['waveform_handle_ctype'], {}), '(waveform_handle_ctype)\n', (79834, 79857), False, 'import ctypes\n'), ((82894, 82931), 'ctypes.pointer', 'ctypes.pointer', (['waveform_handle_ctype'], {}), '(waveform_handle_ctype)\n', (82908, 82931), False, 'import ctypes\n'), ((86762, 86799), 'ctypes.pointer', 'ctypes.pointer', (['waveform_handle_ctype'], {}), '(waveform_handle_ctype)\n', (86776, 86799), False, 'import ctypes\n'), ((90663, 90700), 'ctypes.pointer', 'ctypes.pointer', (['waveform_handle_ctype'], {}), '(waveform_handle_ctype)\n', (90677, 90700), False, 'import ctypes\n'), ((93737, 93774), 'ctypes.pointer', 'ctypes.pointer', (['waveform_handle_ctype'], {}), '(waveform_handle_ctype)\n', (93751, 93774), False, 'import ctypes\n'), ((101661, 101698), 'ctypes.pointer', 'ctypes.pointer', (['attribute_value_ctype'], {}), '(attribute_value_ctype)\n', (101675, 101698), False, 'import ctypes\n'), ((103528, 103565), 'ctypes.pointer', 'ctypes.pointer', (['attribute_value_ctype'], {}), '(attribute_value_ctype)\n', (103542, 103565), False, 'import ctypes\n'), ((105411, 105448), 'ctypes.pointer', 'ctypes.pointer', (['attribute_value_ctype'], {}), '(attribute_value_ctype)\n', (105425, 105448), False, 'import ctypes\n'), ((111743, 111775), 'ctypes.pointer', 'ctypes.pointer', (['error_code_ctype'], {}), '(error_code_ctype)\n', (111757, 111775), False, 'import ctypes\n'), ((112233, 112265), 'ctypes.pointer', 'ctypes.pointer', (['error_code_ctype'], {}), '(error_code_ctype)\n', (112247, 112265), False, 'import ctypes\n'), ((183696, 183733), 'ctypes.pointer', 'ctypes.pointer', (['sequence_handle_ctype'], {}), '(sequence_handle_ctype)\n', (183710, 183733), False, 'import ctypes\n'), ((187885, 187922), 'ctypes.pointer', 'ctypes.pointer', (['sequence_handle_ctype'], {}), '(sequence_handle_ctype)\n', (187899, 187922), False, 'import ctypes\n'), ((194689, 194732), 'ctypes.pointer', 'ctypes.pointer', (['frequency_list_handle_ctype'], {}), '(frequency_list_handle_ctype)\n', (194703, 194732), False, 'import ctypes\n'), ((201192, 201218), 'ctypes.pointer', 'ctypes.pointer', (['year_ctype'], {}), '(year_ctype)\n', (201206, 201218), False, 'import ctypes\n'), ((201255, 201282), 'ctypes.pointer', 'ctypes.pointer', (['month_ctype'], {}), '(month_ctype)\n', (201269, 201282), False, 'import ctypes\n'), ((201317, 201342), 'ctypes.pointer', 'ctypes.pointer', (['day_ctype'], {}), '(day_ctype)\n', (201331, 201342), False, 'import ctypes\n'), ((201378, 201404), 'ctypes.pointer', 'ctypes.pointer', (['hour_ctype'], {}), '(hour_ctype)\n', (201392, 201404), False, 'import ctypes\n'), ((201442, 201470), 'ctypes.pointer', 'ctypes.pointer', (['minute_ctype'], {}), '(minute_ctype)\n', (201456, 201470), False, 'import ctypes\n'), ((202321, 202354), 'ctypes.pointer', 'ctypes.pointer', (['temperature_ctype'], {}), '(temperature_ctype)\n', (202335, 202354), False, 'import ctypes\n'), ((203102, 203130), 'ctypes.pointer', 'ctypes.pointer', (['months_ctype'], {}), '(months_ctype)\n', (203116, 203130), False, 'import ctypes\n'), ((205155, 205182), 'ctypes.pointer', 'ctypes.pointer', (['state_ctype'], {}), '(state_ctype)\n', (205169, 205182), False, 'import ctypes\n'), ((208071, 208097), 'ctypes.pointer', 'ctypes.pointer', (['year_ctype'], {}), '(year_ctype)\n', (208085, 208097), False, 'import ctypes\n'), ((208134, 208161), 'ctypes.pointer', 'ctypes.pointer', (['month_ctype'], {}), '(month_ctype)\n', (208148, 208161), False, 'import ctypes\n'), ((208196, 208221), 'ctypes.pointer', 'ctypes.pointer', (['day_ctype'], {}), '(day_ctype)\n', (208210, 208221), False, 'import ctypes\n'), ((208257, 208283), 'ctypes.pointer', 'ctypes.pointer', (['hour_ctype'], {}), '(hour_ctype)\n', (208271, 208283), False, 'import ctypes\n'), ((208321, 208349), 'ctypes.pointer', 'ctypes.pointer', (['minute_ctype'], {}), '(minute_ctype)\n', (208335, 208349), False, 'import ctypes\n'), ((209199, 209232), 'ctypes.pointer', 'ctypes.pointer', (['temperature_ctype'], {}), '(temperature_ctype)\n', (209213, 209232), False, 'import ctypes\n'), ((210265, 210305), 'ctypes.pointer', 'ctypes.pointer', (['self_cal_supported_ctype'], {}), '(self_cal_supported_ctype)\n', (210279, 210305), False, 'import ctypes\n'), ((211416, 211459), 'nifgen._converters.convert_to_bytes', '_converters.convert_to_bytes', (['configuration'], {}), '(configuration)\n', (211444, 211459), True, 'import nifgen._converters as _converters\n'), ((221076, 221100), 'ctypes.pointer', 'ctypes.pointer', (['vi_ctype'], {}), '(vi_ctype)\n', (221090, 221100), False, 'import ctypes\n'), ((222874, 222900), 'ctypes.pointer', 'ctypes.pointer', (['done_ctype'], {}), '(done_ctype)\n', (222888, 222900), False, 'import ctypes\n'), ((224833, 224882), 'ctypes.pointer', 'ctypes.pointer', (['maximum_number_of_sequences_ctype'], {}), '(maximum_number_of_sequences_ctype)\n', (224847, 224882), False, 'import ctypes\n'), ((224937, 224982), 'ctypes.pointer', 'ctypes.pointer', (['minimum_sequence_length_ctype'], {}), '(minimum_sequence_length_ctype)\n', (224951, 224982), False, 'import ctypes\n'), ((225037, 225082), 'ctypes.pointer', 'ctypes.pointer', (['maximum_sequence_length_ctype'], {}), '(maximum_sequence_length_ctype)\n', (225051, 225082), False, 'import ctypes\n'), ((225132, 225172), 'ctypes.pointer', 'ctypes.pointer', (['maximum_loop_count_ctype'], {}), '(maximum_loop_count_ctype)\n', (225146, 225172), False, 'import ctypes\n'), ((227560, 227609), 'ctypes.pointer', 'ctypes.pointer', (['maximum_number_of_waveforms_ctype'], {}), '(maximum_number_of_waveforms_ctype)\n', (227574, 227609), False, 'import ctypes\n'), ((227657, 227695), 'ctypes.pointer', 'ctypes.pointer', (['waveform_quantum_ctype'], {}), '(waveform_quantum_ctype)\n', (227671, 227695), False, 'import ctypes\n'), ((227748, 227791), 'ctypes.pointer', 'ctypes.pointer', (['minimum_waveform_size_ctype'], {}), '(minimum_waveform_size_ctype)\n', (227762, 227791), False, 'import ctypes\n'), ((227844, 227887), 'ctypes.pointer', 'ctypes.pointer', (['maximum_waveform_size_ctype'], {}), '(maximum_waveform_size_ctype)\n', (227858, 227887), False, 'import ctypes\n'), ((230741, 230791), 'ctypes.pointer', 'ctypes.pointer', (['maximum_number_of_freq_lists_ctype'], {}), '(maximum_number_of_freq_lists_ctype)\n', (230755, 230791), False, 'import ctypes\n'), ((230852, 230903), 'ctypes.pointer', 'ctypes.pointer', (['minimum_frequency_list_length_ctype'], {}), '(minimum_frequency_list_length_ctype)\n', (230866, 230903), False, 'import ctypes\n'), ((230964, 231015), 'ctypes.pointer', 'ctypes.pointer', (['maximum_frequency_list_length_ctype'], {}), '(maximum_frequency_list_length_ctype)\n', (230978, 231015), False, 'import ctypes\n'), ((231078, 231131), 'ctypes.pointer', 'ctypes.pointer', (['minimum_frequency_list_duration_ctype'], {}), '(minimum_frequency_list_duration_ctype)\n', (231092, 231131), False, 'import ctypes\n'), ((231194, 231247), 'ctypes.pointer', 'ctypes.pointer', (['maximum_frequency_list_duration_ctype'], {}), '(maximum_frequency_list_duration_ctype)\n', (231208, 231247), False, 'import ctypes\n'), ((231310, 231363), 'ctypes.pointer', 'ctypes.pointer', (['frequency_list_duration_quantum_ctype'], {}), '(frequency_list_duration_quantum_ctype)\n', (231324, 231363), False, 'import ctypes\n'), ((232401, 232434), 'ctypes.pointer', 'ctypes.pointer', (['temperature_ctype'], {}), '(temperature_ctype)\n', (232415, 232434), False, 'import ctypes\n'), ((239602, 239640), 'ctypes.pointer', 'ctypes.pointer', (['self_test_result_ctype'], {}), '(self_test_result_ctype)\n', (239616, 239640), False, 'import ctypes\n'), ((1074, 1102), 'ctypes.POINTER', 'ctypes.POINTER', (['library_type'], {}), '(library_type)\n', (1088, 1102), False, 'import ctypes\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 3 15:18:38 2014
@author: <NAME> and <NAME>
Loads .continuous, .events, and .spikes files saved from the Open Ephys GUI
Usage:
import OpenEphys
data = OpenEphys.load(pathToFile) # returns a dict with data, timestamps, etc.
"""
import os
import numpy as np
import scipy.signal
import scipy.io
import time
import struct
from copy import deepcopy
# constants
NUM_HEADER_BYTES = 1024
SAMPLES_PER_RECORD = 1024
BYTES_PER_SAMPLE = 2
RECORD_SIZE = 4 + 8 + SAMPLES_PER_RECORD * BYTES_PER_SAMPLE + 10 # size of each continuous record in bytes
RECORD_MARKER = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])
# constants for pre-allocating matrices:
MAX_NUMBER_OF_SPIKES = int(1e6)
MAX_NUMBER_OF_RECORDS = int(1e6)
MAX_NUMBER_OF_EVENTS = int(1e6)
def load(filepath, dtype = float):
# redirects to code for individual file types
if 'continuous' in filepath:
data = loadContinuous(filepath, dtype)
elif 'spikes' in filepath:
data = loadSpikes(filepath)
elif 'events' in filepath:
data = loadEvents(filepath)
else:
raise Exception("Not a recognized file type. Please input a .continuous, .spikes, or .events file")
return data
def loadFolder(folderpath, dtype = float, **kwargs):
# load all continuous files in a folder
data = { }
# load all continuous files in a folder
if 'channels' in kwargs.keys():
filelist = ['100_CH'+x+'.continuous' for x in map(str,kwargs['channels'])]
else:
filelist = os.listdir(folderpath)
t0 = time.time()
numFiles = 0
for i, f in enumerate(filelist):
if '.continuous' in f:
data[f.replace('.continuous','')] = loadContinuous(os.path.join(folderpath, f), dtype = dtype)
numFiles += 1
print(''.join(('Avg. Load Time: ', str((time.time() - t0)/numFiles),' sec')))
print(''.join(('Total Load Time: ', str((time.time() - t0)),' sec')))
return data
def loadFolderToArray(folderpath, channels = 'all', chprefix = 'CH',
dtype = float, session = '0', source = '100'):
'''Load continuous files in specified folder to a single numpy array. By default all
CH continous files are loaded in numerical order, ordering can be specified with
optional channels argument which should be a list of channel numbers.'''
if channels == 'all':
channels = _get_sorted_channels(folderpath, chprefix, session, source)
if session == '0':
filelist = [source + '_'+chprefix + x + '.continuous' for x in map(str,channels)]
else:
filelist = [source + '_'+chprefix + x + '_' + session + '.continuous' for x in map(str,channels)]
t0 = time.time()
numFiles = 1
channel_1_data = loadContinuous(os.path.join(folderpath, filelist[0]), dtype)['data']
n_samples = len(channel_1_data)
n_channels = len(filelist)
data_array = np.zeros([n_samples, n_channels], dtype)
data_array[:,0] = channel_1_data
for i, f in enumerate(filelist[1:]):
data_array[:, i + 1] = loadContinuous(os.path.join(folderpath, f), dtype)['data']
numFiles += 1
print(''.join(('Avg. Load Time: ', str((time.time() - t0)/numFiles),' sec')))
print(''.join(('Total Load Time: ', str((time.time() - t0)),' sec')))
return data_array
def loadContinuous(filepath, dtype = float):
assert dtype in (float, np.int16), \
'Invalid data type specified for loadContinous, valid types are float and np.int16'
print("Loading continuous data...")
ch = { }
#read in the data
f = open(filepath,'rb')
fileLength = os.fstat(f.fileno()).st_size
# calculate number of samples
recordBytes = fileLength - NUM_HEADER_BYTES
if recordBytes % RECORD_SIZE != 0:
raise Exception("File size is not consistent with a continuous file: may be corrupt")
nrec = recordBytes // RECORD_SIZE
nsamp = nrec * SAMPLES_PER_RECORD
# pre-allocate samples
samples = np.zeros(nsamp, dtype)
timestamps = np.zeros(nrec)
recordingNumbers = np.zeros(nrec)
indices = np.arange(0, nsamp + 1, SAMPLES_PER_RECORD, np.dtype(np.int64))
header = readHeader(f)
recIndices = np.arange(0, nrec)
for recordNumber in recIndices:
timestamps[recordNumber] = np.fromfile(f,np.dtype('<i8'),1) # little-endian 64-bit signed integer
N = np.fromfile(f,np.dtype('<u2'),1)[0] # little-endian 16-bit unsigned integer
#print index
if N != SAMPLES_PER_RECORD:
raise Exception('Found corrupted record in block ' + str(recordNumber))
recordingNumbers[recordNumber] = (np.fromfile(f,np.dtype('>u2'),1)) # big-endian 16-bit unsigned integer
if dtype == float: # Convert data to float array and convert bits to voltage.
data = np.fromfile(f,np.dtype('>i2'),N) * float(header['bitVolts']) # big-endian 16-bit signed integer, multiplied by bitVolts
else: # Keep data in signed 16 bit integer format.
data = np.fromfile(f,np.dtype('>i2'),N) # big-endian 16-bit signed integer
samples[indices[recordNumber]:indices[recordNumber+1]] = data
marker = f.read(10) # dump
#print recordNumber
#print index
ch['header'] = header
ch['timestamps'] = timestamps
ch['data'] = samples # OR use downsample(samples,1), to save space
ch['recordingNumber'] = recordingNumbers
f.close()
return ch
def loadSpikes(filepath):
'''
Loads spike waveforms and timestamps from filepath (should be .spikes file)
'''
data = { }
print('loading spikes...')
f = open(filepath, 'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .spikes files with version 0.4 or higher')
data['header'] = header
numChannels = int(header['num_channels'])
numSamples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**
spikes = np.zeros((MAX_NUMBER_OF_SPIKES, numSamples, numChannels))
timestamps = np.zeros(MAX_NUMBER_OF_SPIKES)
source = np.zeros(MAX_NUMBER_OF_SPIKES)
gain = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
thresh = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
sortedId = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
recNum = np.zeros(MAX_NUMBER_OF_SPIKES)
currentSpike = 0
while f.tell() < os.fstat(f.fileno()).st_size:
eventType = np.fromfile(f, np.dtype('<u1'),1) #always equal to 4, discard
timestamps[currentSpike] = np.fromfile(f, np.dtype('<i8'), 1)
software_timestamp = np.fromfile(f, np.dtype('<i8'), 1)
source[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
numChannels = np.fromfile(f, np.dtype('<u2'), 1)[0]
numSamples = np.fromfile(f, np.dtype('<u2'), 1)[0]
sortedId[currentSpike] = np.fromfile(f, np.dtype('<u2'),1)
electrodeId = np.fromfile(f, np.dtype('<u2'),1)
channel = np.fromfile(f, np.dtype('<u2'),1)
color = np.fromfile(f, np.dtype('<u1'), 3)
pcProj = np.fromfile(f, np.float32, 2)
sampleFreq = np.fromfile(f, np.dtype('<u2'),1)
waveforms = np.fromfile(f, np.dtype('<u2'), numChannels*numSamples)
gain[currentSpike,:] = np.fromfile(f, np.float32, numChannels)
thresh[currentSpike,:] = np.fromfile(f, np.dtype('<u2'), numChannels)
recNum[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
waveforms_reshaped = np.reshape(waveforms, (numChannels, numSamples))
waveforms_reshaped = waveforms_reshaped.astype(float)
waveforms_uv = waveforms_reshaped
for ch in range(numChannels):
waveforms_uv[ch, :] -= 32768
waveforms_uv[ch, :] /= gain[currentSpike, ch]*1000
spikes[currentSpike] = waveforms_uv.T
currentSpike += 1
data['spikes'] = spikes[:currentSpike,:,:]
data['timestamps'] = timestamps[:currentSpike]
data['source'] = source[:currentSpike]
data['gain'] = gain[:currentSpike,:]
data['thresh'] = thresh[:currentSpike,:]
data['recordingNumber'] = recNum[:currentSpike]
data['sortedId'] = sortedId[:currentSpike]
return data
def loadEvents(filepath):
data = { }
print('loading events...')
f = open(filepath,'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .events files with version 0.4 or higher')
data['header'] = header
index = -1
channel = np.zeros(MAX_NUMBER_OF_EVENTS)
timestamps = np.zeros(MAX_NUMBER_OF_EVENTS)
sampleNum = np.zeros(MAX_NUMBER_OF_EVENTS)
nodeId = np.zeros(MAX_NUMBER_OF_EVENTS)
eventType = np.zeros(MAX_NUMBER_OF_EVENTS)
eventId = np.zeros(MAX_NUMBER_OF_EVENTS)
recordingNumber = np.zeros(MAX_NUMBER_OF_EVENTS)
while f.tell() < os.fstat(f.fileno()).st_size:
index += 1
timestamps[index] = np.fromfile(f, np.dtype('<i8'), 1)
sampleNum[index] = np.fromfile(f, np.dtype('<i2'), 1)
eventType[index] = np.fromfile(f, np.dtype('<u1'), 1)
nodeId[index] = np.fromfile(f, np.dtype('<u1'), 1)
eventId[index] = np.fromfile(f, np.dtype('<u1'), 1)
channel[index] = np.fromfile(f, np.dtype('<u1'), 1)
recordingNumber[index] = np.fromfile(f, np.dtype('<u2'), 1)
data['channel'] = channel[:index]
data['timestamps'] = timestamps[:index]
data['eventType'] = eventType[:index]
data['nodeId'] = nodeId[:index]
data['eventId'] = eventId[:index]
data['recordingNumber'] = recordingNumber[:index]
data['sampleNum'] = sampleNum[:index]
return data
def readHeader(f):
header = { }
h = f.read(1024).decode().replace('\n','').replace('header.','')
for i,item in enumerate(h.split(';')):
if '=' in item:
header[item.split(' = ')[0]] = item.split(' = ')[1]
return header
def downsample(trace,down):
downsampled = scipy.signal.resample(trace,np.shape(trace)[0]/down)
return downsampled
def pack(folderpath,source='100',**kwargs):
#convert single channel open ephys channels to a .dat file for compatibility with the KlustaSuite, Neuroscope and Klusters
#should not be necessary for versions of open ephys which write data into HDF5 format.
#loads .continuous files in the specified folder and saves a .DAT in that folder
#optional arguments:
# source: string name of the source that openephys uses as the prefix. is usually 100, if the headstage is the first source added, but can specify something different
#
# data: pre-loaded data to be packed into a .DAT
# dref: int specifying a channel # to use as a digital reference. is subtracted from all channels.
# order: the order in which the .continuos files are packed into the .DAT. should be a list of .continious channel numbers. length must equal total channels.
# suffix: appended to .DAT filename, which is openephys.DAT if no suffix provided.
#load the openephys data into memory
if 'data' not in kwargs.keys():
if 'channels' not in kwargs.keys():
data = loadFolder(folderpath, dtype = np.int16)
else:
data = loadFolder(folderpath, dtype = np.int16, channels=kwargs['channels'])
else:
data = kwargs['data']
#if specified, do the digital referencing
if 'dref' in kwargs.keys():
ref =load(os.path.join(folderpath,''.join((source,'_CH',str(kwargs['dref']),'.continuous'))))
for i,channel in enumerate(data.keys()):
data[channel]['data'] = data[channel]['data'] - ref['data']
#specify the order the channels are written in
if 'order' in kwargs.keys():
order = kwargs['order']
else:
order = list(data)
#add a suffix, if one was specified
if 'suffix' in kwargs.keys():
suffix=kwargs['suffix']
else:
suffix=''
#make a file to write the data back out into .dat format
outpath = os.path.join(folderpath,''.join(('openephys',suffix,'.dat')))
out = open(outpath,'wb')
#go through the data and write it out in the .dat format
#.dat format specified here: http://neuroscope.sourceforge.net/UserManual/data-files.html
channelOrder = []
print(''.join(('...saving .dat to ',outpath,'...')))
random_datakey = next(iter(data))
bar = ProgressBar(len(data[random_datakey]['data']))
for i in range(len(data[random_datakey]['data'])):
for j in range(len(order)):
if source in random_datakey:
ch = data[order[j]]['data']
else:
ch = data[''.join(('CH',str(order[j]).replace('CH','')))]['data']
out.write(struct.pack('h',ch[i]))#signed 16-bit integer
#figure out which order this thing packed the channels in. only do this once.
if i == 0:
channelOrder.append(order[j])
#update how mucb we have list
if i%(len(data[random_datakey]['data'])/100)==0:
bar.animate(i)
out.close()
print(''.join(('order: ',str(channelOrder))))
print(''.join(('.dat saved to ',outpath)))
#**********************************************************
# progress bar class used to show progress of pack()
#stolen from some post on stack overflow
import sys
try:
from IPython.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 40
self.__update_amount(0)
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
print('\r', self,)
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
#*************************************************************
def pack_2(folderpath, filename='', channels='all', chprefix='CH',
dref=None, session='0', source='100', highpass=0, fs=30000, connected_channels=None):
'''Alternative version of pack which uses numpy's tofile function to write data.
pack_2 is much faster than pack and avoids quantization noise incurred in pack due
to conversion of data to float voltages during loadContinous followed by rounding
back to integers for packing.
filename: Name of the output file. By default, it follows the same layout of continuous files,
but without the channel number, for example, '100_CHs_3.dat' or '100_ADCs.dat'.
channels: List of channel numbers specifying order in which channels are packed. By default
all CH continous files are packed in numerical order.
chprefix: String name that defines if channels from headstage, auxiliary or ADC inputs
will be loaded.
dref: Digital referencing - either supply a channel number or 'ave' to reference to the
average of packed channels.
source: String name of the source that openephys uses as the prefix. It is usually 100,
if the headstage is the first source added, but can specify something different.
'''
data_array = loadFolderToArray(folderpath, channels, chprefix, np.int16, session, source) # n_samples X n_channels
# apply high pass filtering
if highpass:
print('Filtering dem signals!')
# create high pass filter
nn, wn = scipy.signal.buttord(highpass * 2 / fs, highpass * 2 / fs * 0.5, 3, 40)
num, denom = scipy.signal.butter(nn, wn, btype='high')
# apply filter
for i in range(data_array.shape[1]):
data_array[:, i] = scipy.signal.filtfilt(num, denom, data_array[:, i])
# apply referencing
if dref:
if dref == 'ave':
if connected_channels is None:
print('Digital referencing to average of all channels.')
reference = np.mean(data_array, 1)
else:
print('Digital referencing to average of %i connected channels.' % sum(connected_channels))
reference = np.mean(data_array[:, connected_channels], 1)
elif dref == 'med':
if connected_channels is None:
print('Digital referencing to median of all channels.')
reference = np.median(data_array, 1)
else:
print('Digital referencing to median of %i connected channels.' % sum(connected_channels))
reference = np.median(data_array[:, connected_channels], 1)
else:
print('Digital referencing to channel ' + str(dref))
if channels == 'all':
channels = _get_sorted_channels(folderpath, chprefix, session, source)
reference = deepcopy(data_array[:, channels.index(dref)])
for i in range(data_array.shape[1]):
data_array[:, i] = data_array[:, i] - reference
if session == '0':
session = ''
else:
session = '_' + session
if not filename: filename = source + '_' + chprefix + 's' + session + '.dat'
print('Packing data to file: ' + filename)
data_array.tofile(os.path.join(folderpath, filename))
def _get_sorted_channels(folderpath, chprefix='CH', session='0', source='100'):
Files = [f for f in os.listdir(folderpath) if '.continuous' in f
and '_' + chprefix in f
and source in f]
if session == '0':
Files = [f for f in Files if len(f.split('_')) == 2]
Chs = sorted([int(f.split('_' + chprefix)[1].split('.')[0]) for f in Files])
else:
Files = [f for f in Files if len(f.split('_')) == 3
and f.split('.')[0].split('_')[2] == session]
Chs = sorted([int(f.split('_' + chprefix)[1].split('_')[0]) for f in Files])
return (Chs) | [
"numpy.fromfile",
"numpy.median",
"numpy.dtype",
"numpy.zeros",
"struct.pack",
"time.time",
"numpy.shape",
"numpy.mean",
"numpy.array",
"numpy.arange",
"numpy.reshape",
"sys.stdout.flush",
"os.path.join",
"os.listdir"
] | [((611, 653), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 255]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])\n', (619, 653), True, 'import numpy as np\n'), ((1570, 1581), 'time.time', 'time.time', ([], {}), '()\n', (1579, 1581), False, 'import time\n'), ((2711, 2722), 'time.time', 'time.time', ([], {}), '()\n', (2720, 2722), False, 'import time\n'), ((2918, 2958), 'numpy.zeros', 'np.zeros', (['[n_samples, n_channels]', 'dtype'], {}), '([n_samples, n_channels], dtype)\n', (2926, 2958), True, 'import numpy as np\n'), ((4003, 4025), 'numpy.zeros', 'np.zeros', (['nsamp', 'dtype'], {}), '(nsamp, dtype)\n', (4011, 4025), True, 'import numpy as np\n'), ((4043, 4057), 'numpy.zeros', 'np.zeros', (['nrec'], {}), '(nrec)\n', (4051, 4057), True, 'import numpy as np\n'), ((4081, 4095), 'numpy.zeros', 'np.zeros', (['nrec'], {}), '(nrec)\n', (4089, 4095), True, 'import numpy as np\n'), ((4220, 4238), 'numpy.arange', 'np.arange', (['(0)', 'nrec'], {}), '(0, nrec)\n', (4229, 4238), True, 'import numpy as np\n'), ((5973, 6030), 'numpy.zeros', 'np.zeros', (['(MAX_NUMBER_OF_SPIKES, numSamples, numChannels)'], {}), '((MAX_NUMBER_OF_SPIKES, numSamples, numChannels))\n', (5981, 6030), True, 'import numpy as np\n'), ((6048, 6078), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_SPIKES'], {}), '(MAX_NUMBER_OF_SPIKES)\n', (6056, 6078), True, 'import numpy as np\n'), ((6092, 6122), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_SPIKES'], {}), '(MAX_NUMBER_OF_SPIKES)\n', (6100, 6122), True, 'import numpy as np\n'), ((6134, 6179), 'numpy.zeros', 'np.zeros', (['(MAX_NUMBER_OF_SPIKES, numChannels)'], {}), '((MAX_NUMBER_OF_SPIKES, numChannels))\n', (6142, 6179), True, 'import numpy as np\n'), ((6193, 6238), 'numpy.zeros', 'np.zeros', (['(MAX_NUMBER_OF_SPIKES, numChannels)'], {}), '((MAX_NUMBER_OF_SPIKES, numChannels))\n', (6201, 6238), True, 'import numpy as np\n'), ((6254, 6299), 'numpy.zeros', 'np.zeros', (['(MAX_NUMBER_OF_SPIKES, numChannels)'], {}), '((MAX_NUMBER_OF_SPIKES, numChannels))\n', (6262, 6299), True, 'import numpy as np\n'), ((6313, 6343), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_SPIKES'], {}), '(MAX_NUMBER_OF_SPIKES)\n', (6321, 6343), True, 'import numpy as np\n'), ((8515, 8545), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8523, 8545), True, 'import numpy as np\n'), ((8563, 8593), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8571, 8593), True, 'import numpy as np\n'), ((8610, 8640), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8618, 8640), True, 'import numpy as np\n'), ((8654, 8684), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8662, 8684), True, 'import numpy as np\n'), ((8701, 8731), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8709, 8731), True, 'import numpy as np\n'), ((8746, 8776), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8754, 8776), True, 'import numpy as np\n'), ((8799, 8829), 'numpy.zeros', 'np.zeros', (['MAX_NUMBER_OF_EVENTS'], {}), '(MAX_NUMBER_OF_EVENTS)\n', (8807, 8829), True, 'import numpy as np\n'), ((1537, 1559), 'os.listdir', 'os.listdir', (['folderpath'], {}), '(folderpath)\n', (1547, 1559), False, 'import os\n'), ((4154, 4172), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (4162, 4172), True, 'import numpy as np\n'), ((7062, 7091), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32', '(2)'], {}), '(f, np.float32, 2)\n', (7073, 7091), True, 'import numpy as np\n'), ((7255, 7294), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32', 'numChannels'], {}), '(f, np.float32, numChannels)\n', (7266, 7294), True, 'import numpy as np\n'), ((7469, 7517), 'numpy.reshape', 'np.reshape', (['waveforms', '(numChannels, numSamples)'], {}), '(waveforms, (numChannels, numSamples))\n', (7479, 7517), True, 'import numpy as np\n'), ((13805, 13823), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13821, 13823), False, 'import sys\n'), ((17996, 18030), 'os.path.join', 'os.path.join', (['folderpath', 'filename'], {}), '(folderpath, filename)\n', (18008, 18030), False, 'import os\n'), ((2777, 2814), 'os.path.join', 'os.path.join', (['folderpath', 'filelist[0]'], {}), '(folderpath, filelist[0])\n', (2789, 2814), False, 'import os\n'), ((4326, 4341), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (4334, 4341), True, 'import numpy as np\n'), ((4671, 4686), 'numpy.dtype', 'np.dtype', (['""">u2"""'], {}), "('>u2')\n", (4679, 4686), True, 'import numpy as np\n'), ((6453, 6468), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (6461, 6468), True, 'import numpy as np\n'), ((6550, 6565), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (6558, 6565), True, 'import numpy as np\n'), ((6614, 6629), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (6622, 6629), True, 'import numpy as np\n'), ((6680, 6695), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6688, 6695), True, 'import numpy as np\n'), ((6867, 6882), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6875, 6882), True, 'import numpy as np\n'), ((6923, 6938), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6931, 6938), True, 'import numpy as np\n'), ((6975, 6990), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6983, 6990), True, 'import numpy as np\n'), ((7025, 7040), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (7033, 7040), True, 'import numpy as np\n'), ((7128, 7143), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (7136, 7143), True, 'import numpy as np\n'), ((7183, 7198), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (7191, 7198), True, 'import numpy as np\n'), ((7343, 7358), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (7351, 7358), True, 'import numpy as np\n'), ((7419, 7434), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (7427, 7434), True, 'import numpy as np\n'), ((8946, 8961), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (8954, 8961), True, 'import numpy as np\n'), ((9008, 9023), 'numpy.dtype', 'np.dtype', (['"""<i2"""'], {}), "('<i2')\n", (9016, 9023), True, 'import numpy as np\n'), ((9070, 9085), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (9078, 9085), True, 'import numpy as np\n'), ((9129, 9144), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (9137, 9144), True, 'import numpy as np\n'), ((9189, 9204), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (9197, 9204), True, 'import numpy as np\n'), ((9249, 9264), 'numpy.dtype', 'np.dtype', (['"""<u1"""'], {}), "('<u1')\n", (9257, 9264), True, 'import numpy as np\n'), ((9317, 9332), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (9325, 9332), True, 'import numpy as np\n'), ((18138, 18160), 'os.listdir', 'os.listdir', (['folderpath'], {}), '(folderpath)\n', (18148, 18160), False, 'import os\n'), ((1731, 1758), 'os.path.join', 'os.path.join', (['folderpath', 'f'], {}), '(folderpath, f)\n', (1743, 1758), False, 'import os\n'), ((3088, 3115), 'os.path.join', 'os.path.join', (['folderpath', 'f'], {}), '(folderpath, f)\n', (3100, 3115), False, 'import os\n'), ((4409, 4424), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (4417, 4424), True, 'import numpy as np\n'), ((5047, 5062), 'numpy.dtype', 'np.dtype', (['""">i2"""'], {}), "('>i2')\n", (5055, 5062), True, 'import numpy as np\n'), ((6737, 6752), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6745, 6752), True, 'import numpy as np\n'), ((6796, 6811), 'numpy.dtype', 'np.dtype', (['"""<u2"""'], {}), "('<u2')\n", (6804, 6811), True, 'import numpy as np\n'), ((9979, 9994), 'numpy.shape', 'np.shape', (['trace'], {}), '(trace)\n', (9987, 9994), True, 'import numpy as np\n'), ((12660, 12683), 'struct.pack', 'struct.pack', (['"""h"""', 'ch[i]'], {}), "('h', ch[i])\n", (12671, 12683), False, 'import struct\n'), ((16763, 16785), 'numpy.mean', 'np.mean', (['data_array', '(1)'], {}), '(data_array, 1)\n', (16770, 16785), True, 'import numpy as np\n'), ((16940, 16985), 'numpy.mean', 'np.mean', (['data_array[:, connected_channels]', '(1)'], {}), '(data_array[:, connected_channels], 1)\n', (16947, 16985), True, 'import numpy as np\n'), ((4848, 4863), 'numpy.dtype', 'np.dtype', (['""">i2"""'], {}), "('>i2')\n", (4856, 4863), True, 'import numpy as np\n'), ((17157, 17181), 'numpy.median', 'np.median', (['data_array', '(1)'], {}), '(data_array, 1)\n', (17166, 17181), True, 'import numpy as np\n'), ((17335, 17382), 'numpy.median', 'np.median', (['data_array[:, connected_channels]', '(1)'], {}), '(data_array[:, connected_channels], 1)\n', (17344, 17382), True, 'import numpy as np\n'), ((1929, 1940), 'time.time', 'time.time', ([], {}), '()\n', (1938, 1940), False, 'import time\n'), ((3286, 3297), 'time.time', 'time.time', ([], {}), '()\n', (3295, 3297), False, 'import time\n'), ((1846, 1857), 'time.time', 'time.time', ([], {}), '()\n', (1855, 1857), False, 'import time\n'), ((3203, 3214), 'time.time', 'time.time', ([], {}), '()\n', (3212, 3214), False, 'import time\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
# not used in the final model
x = x + self.pe[:x.shape[0], :]
return self.dropout(x)
# only for ablation / not used in the final model
class TimeEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(TimeEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask, lengths):
time = mask * 1/(lengths[..., None]-1)
time = time[:, None] * torch.arange(time.shape[1], device=x.device)[None, :]
time = time[:, 0].T
# add the time encoding
x = x + time[..., None]
return self.dropout(x)
class Encoder_TRANSFORMER(nn.Module):
def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot,
latent_dim=256, ff_size=1024, num_layers=4, num_heads=4, dropout=0.1,
ablation=None, activation="gelu", **kargs):
super().__init__()
self.modeltype = modeltype
self.njoints = njoints
self.nfeats = nfeats
self.num_frames = num_frames
self.num_classes = num_classes
self.pose_rep = pose_rep
self.glob = glob
self.glob_rot = glob_rot
self.translation = translation
self.latent_dim = latent_dim
self.ff_size = ff_size
self.num_layers = num_layers
self.num_heads = num_heads
self.dropout = dropout
self.ablation = ablation
self.activation = activation
self.input_feats = self.njoints*self.nfeats
self.normalize_output = kargs.get('normalize_encoder_output', False)
if self.ablation == "average_encoder":
self.mu_layer = nn.Linear(self.latent_dim, self.latent_dim)
self.sigma_layer = nn.Linear(self.latent_dim, self.latent_dim)
# elif self.ablation == "extra_token":
# self.extra_token = nn.Parameter(torch.randn(1, self.latent_dim))
else:
self.muQuery = nn.Parameter(torch.randn(self.num_classes, self.latent_dim))
self.sigmaQuery = nn.Parameter(torch.randn(self.num_classes, self.latent_dim))
self.skelEmbedding = nn.Linear(self.input_feats, self.latent_dim)
self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout)
seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,
nhead=self.num_heads,
dim_feedforward=self.ff_size,
dropout=self.dropout,
activation=self.activation)
self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,
num_layers=self.num_layers)
def forward(self, batch):
x, y, mask = batch["x"], batch["y"], batch["mask"]
bs, njoints, nfeats, nframes = x.shape
x = x.permute((3, 0, 1, 2)).reshape(nframes, bs, njoints*nfeats)
# embedding of the skeleton
x = self.skelEmbedding(x)
# adding the mu and sigma queries
xseq = torch.cat((self.muQuery[y][None], self.sigmaQuery[y][None], x), axis=0)
# add positional encoding
xseq = self.sequence_pos_encoder(xseq)
# create a bigger mask, to allow attend to mu and sigma
muandsigmaMask = torch.ones((bs, 2), dtype=bool, device=x.device)
maskseq = torch.cat((muandsigmaMask, mask), axis=1)
final = self.seqTransEncoder(xseq, src_key_padding_mask=~maskseq)
mu = final[0]
logvar = final[1]
if self.normalize_output:
mu = mu / mu.norm(dim=-1, keepdim=True)
return {"mu": mu}
class Decoder_TRANSFORMER(nn.Module):
def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot,
latent_dim=256, ff_size=1024, num_layers=4, num_heads=4, dropout=0.1, activation="gelu",
ablation=None, **kargs):
super().__init__()
self.modeltype = modeltype
self.njoints = njoints
self.nfeats = nfeats
self.num_frames = num_frames
self.num_classes = num_classes
self.pose_rep = pose_rep
self.glob = glob
self.glob_rot = glob_rot
self.translation = translation
self.latent_dim = latent_dim
self.ff_size = ff_size
self.num_layers = num_layers
self.num_heads = num_heads
self.dropout = dropout
self.ablation = ablation
self.activation = activation
self.input_feats = self.njoints*self.nfeats
self.normalize_decoder_input = kargs.get('normalize_decoder_input', False)
# only for ablation / not used in the final model
if self.ablation == "zandtime":
self.ztimelinear = nn.Linear(self.latent_dim + self.num_classes, self.latent_dim)
else:
self.actionBiases = nn.Parameter(torch.randn(self.num_classes, self.latent_dim))
# only for ablation / not used in the final model
if self.ablation == "time_encoding":
self.sequence_pos_encoder = TimeEncoding(self.dropout)
else:
self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout)
seqTransDecoderLayer = nn.TransformerDecoderLayer(d_model=self.latent_dim,
nhead=self.num_heads,
dim_feedforward=self.ff_size,
dropout=self.dropout,
activation=activation)
self.seqTransDecoder = nn.TransformerDecoder(seqTransDecoderLayer,
num_layers=self.num_layers)
self.finallayer = nn.Linear(self.latent_dim, self.input_feats)
def forward(self, batch, use_text_emb=False):
z, mask, lengths = batch["z"], batch["mask"], batch["lengths"]
if use_text_emb:
z = batch["clip_text_emb"]
latent_dim = z.shape[1]
bs, nframes = mask.shape
njoints, nfeats = self.njoints, self.nfeats
# only for ablation / not used in the final model
if self.ablation == "zandtime":
yoh = F.one_hot(y, self.num_classes)
z = torch.cat((z, yoh), axis=1)
z = self.ztimelinear(z)
z = z[None] # sequence of size 1
else:
# only for ablation / not used in the final model
if self.ablation == "concat_bias":
# sequence of size 2
z = torch.stack((z, self.actionBiases[y]), axis=0)
else:
# shift the latent noise vector to be the action noise
# z = z + self.actionBiases[y] # TODO - REMOVED HERE BIAS IN ENCODER
z = z[None] # sequence of size 1
timequeries = torch.zeros(nframes, bs, latent_dim, device=z.device)
# only for ablation / not used in the final model
if self.ablation == "time_encoding":
timequeries = self.sequence_pos_encoder(timequeries, mask, lengths)
else:
timequeries = self.sequence_pos_encoder(timequeries)
if self.normalize_decoder_input:
z = z / torch.norm(z, dim=-1, keepdim=True)
output = self.seqTransDecoder(tgt=timequeries, memory=z,
tgt_key_padding_mask=~mask)
output = self.finallayer(output).reshape(nframes, bs, njoints, nfeats)
# zero for padded area
output[~mask.T] = 0
output = output.permute(1, 2, 3, 0)
if use_text_emb:
batch["txt_output"] = output
else:
batch["output"] = output
return batch
| [
"torch.nn.Dropout",
"torch.nn.TransformerEncoder",
"torch.ones",
"torch.nn.TransformerDecoderLayer",
"torch.stack",
"torch.nn.TransformerDecoder",
"numpy.log",
"torch.norm",
"torch.nn.TransformerEncoderLayer",
"torch.cat",
"torch.nn.functional.one_hot",
"torch.randn",
"torch.cos",
"torch.a... | [((259, 280), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (269, 280), True, 'import torch.nn as nn\n'), ((295, 324), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (306, 324), False, 'import torch\n'), ((520, 550), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (529, 550), False, 'import torch\n'), ((573, 603), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (582, 603), False, 'import torch\n'), ((1044, 1065), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (1054, 1065), True, 'import torch.nn as nn\n'), ((2967, 3011), 'torch.nn.Linear', 'nn.Linear', (['self.input_feats', 'self.latent_dim'], {}), '(self.input_feats, self.latent_dim)\n', (2976, 3011), True, 'import torch.nn as nn\n'), ((3148, 3310), 'torch.nn.TransformerEncoderLayer', 'nn.TransformerEncoderLayer', ([], {'d_model': 'self.latent_dim', 'nhead': 'self.num_heads', 'dim_feedforward': 'self.ff_size', 'dropout': 'self.dropout', 'activation': 'self.activation'}), '(d_model=self.latent_dim, nhead=self.num_heads,\n dim_feedforward=self.ff_size, dropout=self.dropout, activation=self.\n activation)\n', (3174, 3310), True, 'import torch.nn as nn\n'), ((3565, 3636), 'torch.nn.TransformerEncoder', 'nn.TransformerEncoder', (['seqTransEncoderLayer'], {'num_layers': 'self.num_layers'}), '(seqTransEncoderLayer, num_layers=self.num_layers)\n', (3586, 3636), True, 'import torch.nn as nn\n'), ((4029, 4100), 'torch.cat', 'torch.cat', (['(self.muQuery[y][None], self.sigmaQuery[y][None], x)'], {'axis': '(0)'}), '((self.muQuery[y][None], self.sigmaQuery[y][None], x), axis=0)\n', (4038, 4100), False, 'import torch\n'), ((4273, 4321), 'torch.ones', 'torch.ones', (['(bs, 2)'], {'dtype': 'bool', 'device': 'x.device'}), '((bs, 2), dtype=bool, device=x.device)\n', (4283, 4321), False, 'import torch\n'), ((4341, 4382), 'torch.cat', 'torch.cat', (['(muandsigmaMask, mask)'], {'axis': '(1)'}), '((muandsigmaMask, mask), axis=1)\n', (4350, 4382), False, 'import torch\n'), ((6289, 6441), 'torch.nn.TransformerDecoderLayer', 'nn.TransformerDecoderLayer', ([], {'d_model': 'self.latent_dim', 'nhead': 'self.num_heads', 'dim_feedforward': 'self.ff_size', 'dropout': 'self.dropout', 'activation': 'activation'}), '(d_model=self.latent_dim, nhead=self.num_heads,\n dim_feedforward=self.ff_size, dropout=self.dropout, activation=activation)\n', (6315, 6441), True, 'import torch.nn as nn\n'), ((6701, 6772), 'torch.nn.TransformerDecoder', 'nn.TransformerDecoder', (['seqTransDecoderLayer'], {'num_layers': 'self.num_layers'}), '(seqTransDecoderLayer, num_layers=self.num_layers)\n', (6722, 6772), True, 'import torch.nn as nn\n'), ((6861, 6905), 'torch.nn.Linear', 'nn.Linear', (['self.latent_dim', 'self.input_feats'], {}), '(self.latent_dim, self.input_feats)\n', (6870, 6905), True, 'import torch.nn as nn\n'), ((7965, 8018), 'torch.zeros', 'torch.zeros', (['nframes', 'bs', 'latent_dim'], {'device': 'z.device'}), '(nframes, bs, latent_dim, device=z.device)\n', (7976, 8018), False, 'import torch\n'), ((2491, 2534), 'torch.nn.Linear', 'nn.Linear', (['self.latent_dim', 'self.latent_dim'], {}), '(self.latent_dim, self.latent_dim)\n', (2500, 2534), True, 'import torch.nn as nn\n'), ((2566, 2609), 'torch.nn.Linear', 'nn.Linear', (['self.latent_dim', 'self.latent_dim'], {}), '(self.latent_dim, self.latent_dim)\n', (2575, 2609), True, 'import torch.nn as nn\n'), ((5804, 5866), 'torch.nn.Linear', 'nn.Linear', (['(self.latent_dim + self.num_classes)', 'self.latent_dim'], {}), '(self.latent_dim + self.num_classes, self.latent_dim)\n', (5813, 5866), True, 'import torch.nn as nn\n'), ((7334, 7364), 'torch.nn.functional.one_hot', 'F.one_hot', (['y', 'self.num_classes'], {}), '(y, self.num_classes)\n', (7343, 7364), True, 'import torch.nn.functional as F\n'), ((7381, 7408), 'torch.cat', 'torch.cat', (['(z, yoh)'], {'axis': '(1)'}), '((z, yoh), axis=1)\n', (7390, 7408), False, 'import torch\n'), ((344, 387), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {'dtype': 'torch.float'}), '(0, max_len, dtype=torch.float)\n', (356, 387), False, 'import torch\n'), ((1186, 1230), 'torch.arange', 'torch.arange', (['time.shape[1]'], {'device': 'x.device'}), '(time.shape[1], device=x.device)\n', (1198, 1230), False, 'import torch\n'), ((2790, 2836), 'torch.randn', 'torch.randn', (['self.num_classes', 'self.latent_dim'], {}), '(self.num_classes, self.latent_dim)\n', (2801, 2836), False, 'import torch\n'), ((2881, 2927), 'torch.randn', 'torch.randn', (['self.num_classes', 'self.latent_dim'], {}), '(self.num_classes, self.latent_dim)\n', (2892, 2927), False, 'import torch\n'), ((5926, 5972), 'torch.randn', 'torch.randn', (['self.num_classes', 'self.latent_dim'], {}), '(self.num_classes, self.latent_dim)\n', (5937, 5972), False, 'import torch\n'), ((7671, 7717), 'torch.stack', 'torch.stack', (['(z, self.actionBiases[y])'], {'axis': '(0)'}), '((z, self.actionBiases[y]), axis=0)\n', (7682, 7717), False, 'import torch\n'), ((8352, 8387), 'torch.norm', 'torch.norm', (['z'], {'dim': '(-1)', 'keepdim': '(True)'}), '(z, dim=-1, keepdim=True)\n', (8362, 8387), False, 'import torch\n'), ((430, 457), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (442, 457), False, 'import torch\n'), ((470, 485), 'numpy.log', 'np.log', (['(10000.0)'], {}), '(10000.0)\n', (476, 485), True, 'import numpy as np\n')] |
import numpy as np
import numpy.ma as ma
from numba import njit
import pairwise
import common
from common import Models, debug
from mutrel import Mutrel
import util
def _check_clusters(variants, clusters, garbage):
for C in clusters:
assert len(C) > 0
vids = common.extract_vids(variants)
clustered = [child for C in clusters for child in C]
garbage = set(garbage)
clustered = set(clustered)
assert len(clustered & garbage) == 0
assert set(vids) == (clustered | garbage)
def use_pre_existing(variants, logprior, parallel, clusters, garbage):
supervars = make_cluster_supervars(clusters, variants)
clust_posterior, clust_evidence = pairwise.calc_posterior(supervars, logprior, rel_type='supervariant', parallel=parallel)
_check_clusters(variants, clusters, garbage)
return (supervars, clust_posterior, clust_evidence, clusters, garbage)
# This code is currently unused. Perhaps I can implement a garbage-detection
# algorithm in the future using it.
def _discard_garbage(clusters, mutrel_posterior, mutrel_evidence):
garbage = []
while True:
M = len(clusters)
assert len(mutrel_posterior.vids) == M
assert mutrel_posterior.rels.shape == (M, M, NUM_MODELS)
garbage_pairs = np.argmax(mutrel_posterior.rels, axis=2) == Models.garbage
if not np.any(garbage_pairs):
break
num_garbage = np.sum(garbage_pairs, axis=1)
most_garbage = np.argmax(num_garbage)
debug('most_garbage=', mutrel_posterior.vids[most_garbage], 'cluster=', clusters[most_garbage], 'garb_post=', max(mutrel_posterior.rels[most_garbage,:,Models.garbage]))
garbage += clusters[most_garbage]
del clusters[most_garbage]
mutrel_posterior = pairwise.remove_variants(mutrel_posterior, (most_garbage,))
mutrel_evidence = pairwise.remove_variants(mutrel_evidence, (most_garbage,))
return (clusters, garbage, mutrel_posterior, mutrel_evidence)
def _make_supervar(name, variants):
assert len(variants) > 0
N = np.array([var['total_reads'] for var in variants])
V = np.array([var['var_reads'] for var in variants])
omega_v = np.array([var['omega_v'] for var in variants])
M, S = N.shape
N_hat = 2*N*omega_v
V_hat = np.minimum(V, N_hat)
omega_v_hat = 0.5 * np.ones(S)
# In creating a supervariant, we must rescale either `V` or `N` (the variant
# or total read counts, respectively), as we're fixing the supervariant's
# `omega_v`. We want the ratio `V / N*omega_v` to remain constant, as this
# represents the "fraction of informative reads that are variant reads". That
# is, `N * omega_v` indicates the number of reads that *could* be variant,
# given what we know of the locus' copy-number state.
#
# As we will fix `omega_v` at 0.5 for the rescaled variant, and we want `V /
# N*omega_v` to remain constant, we can either change `V` or `N` to maintain
# the relationship `V / N*omega_v = V_hat / N_hat*omega_v`. We can solve this
# equation two ways, depending on whether we modify `V` or `N`:
#
# 1. V_hat = V / 2*omega_v
# 2. N_hat = 2*N*omega_v
#
# The advantage to approach 2 is that, when `omega_v` is small, it preserves
# the binomial variance. We have `var = np(1 - p) =~ np = E[X]` when `p` is
# small, meaning the variance scales with the mean when `p` is small. To
# maintain the variance, we should try to maintain `np`, and so since we're
# making `p` bigger, we should make `N` smaller, rather than changing `V`.
#
# Also, this means we can avoid any weird hackery when `omega_v = 0`, since
# we don't have to add edge case checks to avoid division by zero.
#
# Another argument: since we don't have a proper sequencing noise model, a
# small number of variant reads can be assumed to be noise regardless of what
# `omega_v` is. If `omega_v` is small and we observe a couple variant reads,
# we can assume those are solely noise. So, we shouldn't rescale `V` to be
# really large, which is what we formerly did under solution 1.
svar = {
'id': name,
'name': name,
'chrom': None,
'pos': None,
'omega_v': omega_v_hat,
'var_reads': np.round(np.sum(V_hat, axis=0)).astype(np.int),
'total_reads': np.round(np.sum(N_hat, axis=0)).astype(np.int),
}
svar['ref_reads'] = svar['total_reads'] - svar['var_reads']
T = ma.masked_equal(svar['total_reads'], 0)
svar['vaf'] = np.array(svar['var_reads'] / T)
return svar
def make_cluster_supervars(clusters, variants):
supervars = {}
for cidx, cluster in enumerate(clusters):
assert len(cluster) > 0
cvars = [variants[vid] for vid in cluster]
S_name = 'S%s' % (len(supervars) + 1)
supervars[S_name] = _make_supervar(S_name, cvars)
return supervars
def make_superclusters(supervars):
N = len(supervars)
svids = common.extract_vids(supervars)
superclusters = [[S] for S in svids]
return superclusters
| [
"pairwise.remove_variants",
"numpy.minimum",
"numpy.sum",
"pairwise.calc_posterior",
"numpy.argmax",
"numpy.ma.masked_equal",
"numpy.ones",
"common.extract_vids",
"numpy.any",
"numpy.array"
] | [((272, 301), 'common.extract_vids', 'common.extract_vids', (['variants'], {}), '(variants)\n', (291, 301), False, 'import common\n'), ((659, 751), 'pairwise.calc_posterior', 'pairwise.calc_posterior', (['supervars', 'logprior'], {'rel_type': '"""supervariant"""', 'parallel': 'parallel'}), "(supervars, logprior, rel_type='supervariant',\n parallel=parallel)\n", (682, 751), False, 'import pairwise\n'), ((1963, 2013), 'numpy.array', 'np.array', (["[var['total_reads'] for var in variants]"], {}), "([var['total_reads'] for var in variants])\n", (1971, 2013), True, 'import numpy as np\n'), ((2020, 2068), 'numpy.array', 'np.array', (["[var['var_reads'] for var in variants]"], {}), "([var['var_reads'] for var in variants])\n", (2028, 2068), True, 'import numpy as np\n'), ((2081, 2127), 'numpy.array', 'np.array', (["[var['omega_v'] for var in variants]"], {}), "([var['omega_v'] for var in variants])\n", (2089, 2127), True, 'import numpy as np\n'), ((2178, 2198), 'numpy.minimum', 'np.minimum', (['V', 'N_hat'], {}), '(V, N_hat)\n', (2188, 2198), True, 'import numpy as np\n'), ((4287, 4326), 'numpy.ma.masked_equal', 'ma.masked_equal', (["svar['total_reads']", '(0)'], {}), "(svar['total_reads'], 0)\n", (4302, 4326), True, 'import numpy.ma as ma\n'), ((4343, 4374), 'numpy.array', 'np.array', (["(svar['var_reads'] / T)"], {}), "(svar['var_reads'] / T)\n", (4351, 4374), True, 'import numpy as np\n'), ((4759, 4789), 'common.extract_vids', 'common.extract_vids', (['supervars'], {}), '(supervars)\n', (4778, 4789), False, 'import common\n'), ((1349, 1378), 'numpy.sum', 'np.sum', (['garbage_pairs'], {'axis': '(1)'}), '(garbage_pairs, axis=1)\n', (1355, 1378), True, 'import numpy as np\n'), ((1398, 1420), 'numpy.argmax', 'np.argmax', (['num_garbage'], {}), '(num_garbage)\n', (1407, 1420), True, 'import numpy as np\n'), ((1687, 1746), 'pairwise.remove_variants', 'pairwise.remove_variants', (['mutrel_posterior', '(most_garbage,)'], {}), '(mutrel_posterior, (most_garbage,))\n', (1711, 1746), False, 'import pairwise\n'), ((1769, 1827), 'pairwise.remove_variants', 'pairwise.remove_variants', (['mutrel_evidence', '(most_garbage,)'], {}), '(mutrel_evidence, (most_garbage,))\n', (1793, 1827), False, 'import pairwise\n'), ((2221, 2231), 'numpy.ones', 'np.ones', (['S'], {}), '(S)\n', (2228, 2231), True, 'import numpy as np\n'), ((1226, 1266), 'numpy.argmax', 'np.argmax', (['mutrel_posterior.rels'], {'axis': '(2)'}), '(mutrel_posterior.rels, axis=2)\n', (1235, 1266), True, 'import numpy as np\n'), ((1296, 1317), 'numpy.any', 'np.any', (['garbage_pairs'], {}), '(garbage_pairs)\n', (1302, 1317), True, 'import numpy as np\n'), ((4109, 4130), 'numpy.sum', 'np.sum', (['V_hat'], {'axis': '(0)'}), '(V_hat, axis=0)\n', (4115, 4130), True, 'import numpy as np\n'), ((4176, 4197), 'numpy.sum', 'np.sum', (['N_hat'], {'axis': '(0)'}), '(N_hat, axis=0)\n', (4182, 4197), True, 'import numpy as np\n')] |
import tensorflow as tf
from src.base.base_test import BaseTest
from tqdm import tqdm
import numpy as np
import cv2
class SimpleTester(BaseTest):
def __init__(self, sess, model, data, config, logger):
super().__init__(sess, model, data, config, logger)
def test(self):
loop = tqdm(range(self.data.num_batches_test))
losses = []
accs = []
for _ in loop:
loss, acc = self.test_step()
losses.append(loss)
accs.append(acc)
loss = np.mean(losses)
acc = np.mean(accs)
print("test_accuracy: ",
acc * 100, "% train_loss: ", loss)
def test_step(self):
batch_x, batch_y = self.data.next_batch(batch_type="test")
feed_dict = {self.model.x: batch_x, self.model.y: batch_y, self.model.is_training: False,
self.model.hold_prob: 1.0}
loss, acc = self.sess.run([self.model.cross_entropy, self.model.accuracy],
feed_dict=feed_dict)
return loss, acc
| [
"numpy.mean"
] | [((520, 535), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (527, 535), True, 'import numpy as np\n'), ((550, 563), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (557, 563), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pdb
import numpy as np
EPS = 1e-6
def ncc(a,v, zero_norm=True):
a = a.flatten()
v = v.flatten()
if zero_norm:
a = (a - np.mean(a)) / (np.std(a) * len(a))
v = (v - np.mean(v)) / np.std(v)
else:
a = (a) / (np.std(a) * len(a))
v = (v) / np.std(v)
return np.correlate(a,v)
def variance_ncc_dist(sample_arr, gt_arr):
def pixel_wise_xent(m_samp, m_gt, eps=1e-8):
log_samples = np.log(m_samp + eps)
return -1.0*np.sum(m_gt*log_samples, axis=-1)
"""
:param sample_arr: expected shape N x X x Y
:param gt_arr: M x X x Y
:return:
"""
# pdb.set_trace()
gt_arr = gt_arr[0]
sample_arr = sample_arr[0]
mean_seg = np.mean(sample_arr, axis=0)
N = sample_arr.shape[0]
M = gt_arr.shape[0]
sX = sample_arr.shape[1]
sY = sample_arr.shape[2]
E_ss_arr = np.zeros((N,sX,sY))
for i in range(N):
E_ss_arr[i,...] = pixel_wise_xent(sample_arr[i,...], mean_seg)
# print('pixel wise xent')
# plt.imshow( E_ss_arr[i,...])
# plt.show()
# pdb.set_trace()
E_ss = np.mean(E_ss_arr, axis=0)
E_sy_arr = np.zeros((M,N, sX, sY))
for j in range(M):
for i in range(N):
E_sy_arr[j,i, ...] = pixel_wise_xent(sample_arr[i,...], gt_arr[j,...])
E_sy = np.mean(E_sy_arr, axis=1)
ncc_list = []
# pdb.set_trace()
for j in range(M):
ncc_list.append(ncc(E_ss, E_sy[j,...]))
return (1/M)*sum(np.array(ncc_list))
def pdist(a,b):
N = a.shape[1]
M = b.shape[1]
H = a.shape[-2]
W = a.shape[-1]
# C = a.shape[2]
aRep = a.repeat(1,M,1,1).view(-1,N,M,H,W)
bRep = b.repeat(1,N,1,1).view(-1,M,N,H,W).transpose(1,2)
inter = (aRep & bRep).float().sum(-1).sum(-1) + EPS
union = (aRep | bRep).float().sum(-1).sum(-1) + EPS
IoU = inter/union
dis = (1-IoU).mean(-1).mean(-1)
return dis
def ged(seg,prd):
# pdb.set_trace()
seg = seg.type(torch.ByteTensor)
prd = prd.type_as(seg)
dSP = pdist(seg,prd)
dSS = pdist(seg,seg)
dPP = pdist(prd,prd)
return (2*dSP - dSS - dPP)
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
#nn.init.normal_(m.weight, std=0.001)
#nn.init.normal_(m.bias, std=0.001)
truncated_normal_(m.bias, mean=0, std=0.001)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
#nn.init.normal_(m.bias, std=0.001)
def l2_regularisation(m):
l2_reg = None
for W in m.parameters():
if l2_reg is None:
l2_reg = W.norm(2)
else:
l2_reg = l2_reg + W.norm(2)
return l2_reg
def save_mask_prediction_example(mask, pred, iter):
plt.imshow(pred[0,:,:],cmap='Greys')
plt.savefig('images/'+str(iter)+"_prediction.png")
plt.imshow(mask[0,:,:],cmap='Greys')
plt.savefig('images/'+str(iter)+"_mask.png")
| [
"numpy.sum",
"numpy.log",
"torch.nn.init.kaiming_normal_",
"numpy.std",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.correlate",
"torch.nn.init.orthogonal_"
] | [((453, 471), 'numpy.correlate', 'np.correlate', (['a', 'v'], {}), '(a, v)\n', (465, 471), True, 'import numpy as np\n'), ((864, 891), 'numpy.mean', 'np.mean', (['sample_arr'], {'axis': '(0)'}), '(sample_arr, axis=0)\n', (871, 891), True, 'import numpy as np\n'), ((1020, 1041), 'numpy.zeros', 'np.zeros', (['(N, sX, sY)'], {}), '((N, sX, sY))\n', (1028, 1041), True, 'import numpy as np\n'), ((1262, 1287), 'numpy.mean', 'np.mean', (['E_ss_arr'], {'axis': '(0)'}), '(E_ss_arr, axis=0)\n', (1269, 1287), True, 'import numpy as np\n'), ((1304, 1328), 'numpy.zeros', 'np.zeros', (['(M, N, sX, sY)'], {}), '((M, N, sX, sY))\n', (1312, 1328), True, 'import numpy as np\n'), ((1473, 1498), 'numpy.mean', 'np.mean', (['E_sy_arr'], {'axis': '(1)'}), '(E_sy_arr, axis=1)\n', (1480, 1498), True, 'import numpy as np\n'), ((3366, 3405), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pred[0, :, :]'], {'cmap': '"""Greys"""'}), "(pred[0, :, :], cmap='Greys')\n", (3376, 3405), True, 'import matplotlib.pyplot as plt\n'), ((3456, 3495), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask[0, :, :]'], {'cmap': '"""Greys"""'}), "(mask[0, :, :], cmap='Greys')\n", (3466, 3495), True, 'import matplotlib.pyplot as plt\n'), ((589, 609), 'numpy.log', 'np.log', (['(m_samp + eps)'], {}), '(m_samp + eps)\n', (595, 609), True, 'import numpy as np\n'), ((2657, 2726), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (2680, 2726), True, 'import torch.nn as nn\n'), ((2980, 3009), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['m.weight'], {}), '(m.weight)\n', (2999, 3009), True, 'import torch.nn as nn\n'), ((352, 361), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (358, 361), True, 'import numpy as np\n'), ((431, 440), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (437, 440), True, 'import numpy as np\n'), ((631, 666), 'numpy.sum', 'np.sum', (['(m_gt * log_samples)'], {'axis': '(-1)'}), '(m_gt * log_samples, axis=-1)\n', (637, 666), True, 'import numpy as np\n'), ((1634, 1652), 'numpy.array', 'np.array', (['ncc_list'], {}), '(ncc_list)\n', (1642, 1652), True, 'import numpy as np\n'), ((286, 296), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (293, 296), True, 'import numpy as np\n'), ((301, 310), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (307, 310), True, 'import numpy as np\n'), ((338, 348), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (345, 348), True, 'import numpy as np\n'), ((393, 402), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (399, 402), True, 'import numpy as np\n')] |
# %%Author <NAME>
# plot the distribution of variance in sphere
import numpy
from sklearn.neighbors import KDTree
import matplotlib
# matplotlib.use('Agg')
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
import pyemma
'''
Converting the 3D-Euler angles (in global 'zyz' rotation) into viewing directions
Having checked with my matlab code and the current code is correct
'''
def rot_dim(option, angle): # angle should be in degree measure
# convert degree to anti-clockwise radian
angle_radian = -angle / 360.0 * 2 * numpy.pi
if option == 'x':
result = [[1, 0, 0], [0, numpy.cos(angle_radian), -numpy.sin(angle_radian)],
[0, numpy.sin(angle_radian), numpy.cos(angle_radian)]]
elif option == 'y':
result = [[numpy.cos(angle_radian), 0, numpy.sin(angle_radian)], [0, 1, 0],
[-numpy.sin(angle_radian), 0, numpy.cos(angle_radian)]]
elif option == 'z':
result = [[numpy.cos(angle_radian), -numpy.sin(angle_radian), 0],
[numpy.sin(angle_radian), numpy.cos(angle_radian), 0], [0, 0, 1]]
else:
print("please provide valid inputs")
return numpy.matrix(result)
def Euler2Rot(option, angles): # here the angles are in degree measure
# we use global 'zyz' rotation in the cryo-EM project, make sure they are clockwise rotation angles.
if option == 'zyz':
rot = rot_dim('z', -angles[0]).dot(rot_dim('y', -angles[1])).dot(rot_dim('z', -angles[2]))
return rot
def viewing_direction(option, angles):
v = numpy.array([0, 0, 1])
if option == 'zyz':
return Euler2Rot('zyz', angles).dot(v)
# %%
folder = './'
raw_info = numpy.loadtxt(folder + 'result_collect_contributions_weighted.dat')[:, 1:]
top_PC = 3
raw_info_new = raw_info.copy()
'''temp = raw_info[:, 3]
for j in range(4, 7):
temp = temp + raw_info[:, j]
raw_info_new[:, j] = temp'''
data_xy = [] # because we don't care about the third euler angle for the viewing direction
# %%
print(raw_info_new[0, :])
# %%
for j in range(len(raw_info)):
temp = viewing_direction('zyz', [raw_info_new[j][0], raw_info_new[j][1], raw_info_new[j][2]])
if (temp[0, 2] < 0):
data_xy.append([-temp[0, 0], -temp[0, 1]]) # reflect
else:
data_xy.append([temp[0, 0], temp[0, 1]])
# %%
# data_xy will be used to create the kd tree
# as the 2500+ points in my case are almost uniformaly distributed on the sphere, therefore we can simply query the nearest k neighbors rather than specify a radius
data_xy = numpy.array(data_xy)
tree = KDTree(data_xy, leaf_size=10)
new_weights = []
# query for all the points
nn = 1
dist, ind = tree.query(data_xy, k=nn)
# %%
new_weights = []
print(raw_info_new.shape)
for j in range(len(data_xy)):
value_list = [raw_info_new[m][top_PC + 3] for m in ind[j]]
new_weights.append(numpy.mean(value_list))
outputfile = folder + 'projection_vector_variance_knn_new_variance_of_top_%d_PC.dat' % (top_PC)
f = open(outputfile, 'w')
for j in range(len(data_xy)):
f.write('%f %f %f %f\n' % (data_xy[j][0], data_xy[j][1], raw_info_new[j][top_PC + 2], new_weights[j]))
f.close()
# %%
data_xy = numpy.array(data_xy)
plt.scatter(data_xy[:, 0], data_xy[:, 1], s=2)
plt.tick_params(labelsize=25)
# %%
pyemma.plots.plot_free_energy(data_xy[:, 0], data_xy[:, 1], weights=new_weights, logscale=True, nbins=25)
point_xyz = viewing_direction('zyz', [0, 70, 0])
plt.scatter(point_xyz[0, 0], point_xyz[0, 1], s=1000, color='black', marker='p', alpha=0.5)
plt.xlim([-1, 1])
plt.ylim([-1, 1])
plt.tick_params(labelsize=25)
plt.savefig('top_%d_PCs_total_contributions.png' % (top_PC), dpi=300)
# %%
# %%
| [
"matplotlib.pyplot.xlim",
"numpy.matrix",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"pyemma.plots.plot_free_energy",
"numpy.mean",
"numpy.array",
"numpy.loadtxt",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.tick_params",
"sklearn.neighbors.KDTree",
"matplotlib.pyplot.savefig"
] | [((2661, 2681), 'numpy.array', 'numpy.array', (['data_xy'], {}), '(data_xy)\n', (2672, 2681), False, 'import numpy\n'), ((2690, 2719), 'sklearn.neighbors.KDTree', 'KDTree', (['data_xy'], {'leaf_size': '(10)'}), '(data_xy, leaf_size=10)\n', (2696, 2719), False, 'from sklearn.neighbors import KDTree\n'), ((3308, 3328), 'numpy.array', 'numpy.array', (['data_xy'], {}), '(data_xy)\n', (3319, 3328), False, 'import numpy\n'), ((3330, 3376), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data_xy[:, 0]', 'data_xy[:, 1]'], {'s': '(2)'}), '(data_xy[:, 0], data_xy[:, 1], s=2)\n', (3341, 3376), True, 'from matplotlib import pyplot as plt\n'), ((3378, 3407), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(25)'}), '(labelsize=25)\n', (3393, 3407), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3531), 'pyemma.plots.plot_free_energy', 'pyemma.plots.plot_free_energy', (['data_xy[:, 0]', 'data_xy[:, 1]'], {'weights': 'new_weights', 'logscale': '(True)', 'nbins': '(25)'}), '(data_xy[:, 0], data_xy[:, 1], weights=\n new_weights, logscale=True, nbins=25)\n', (3450, 3531), False, 'import pyemma\n'), ((3578, 3674), 'matplotlib.pyplot.scatter', 'plt.scatter', (['point_xyz[0, 0]', 'point_xyz[0, 1]'], {'s': '(1000)', 'color': '"""black"""', 'marker': '"""p"""', 'alpha': '(0.5)'}), "(point_xyz[0, 0], point_xyz[0, 1], s=1000, color='black', marker\n ='p', alpha=0.5)\n", (3589, 3674), True, 'from matplotlib import pyplot as plt\n'), ((3673, 3690), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, 1]'], {}), '([-1, 1])\n', (3681, 3690), True, 'from matplotlib import pyplot as plt\n'), ((3692, 3709), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1, 1]'], {}), '([-1, 1])\n', (3700, 3709), True, 'from matplotlib import pyplot as plt\n'), ((3711, 3740), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(25)'}), '(labelsize=25)\n', (3726, 3740), True, 'from matplotlib import pyplot as plt\n'), ((3744, 3811), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('top_%d_PCs_total_contributions.png' % top_PC)"], {'dpi': '(300)'}), "('top_%d_PCs_total_contributions.png' % top_PC, dpi=300)\n", (3755, 3811), True, 'from matplotlib import pyplot as plt\n'), ((1230, 1250), 'numpy.matrix', 'numpy.matrix', (['result'], {}), '(result)\n', (1242, 1250), False, 'import numpy\n'), ((1632, 1654), 'numpy.array', 'numpy.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1643, 1654), False, 'import numpy\n'), ((1767, 1834), 'numpy.loadtxt', 'numpy.loadtxt', (["(folder + 'result_collect_contributions_weighted.dat')"], {}), "(folder + 'result_collect_contributions_weighted.dat')\n", (1780, 1834), False, 'import numpy\n'), ((2987, 3009), 'numpy.mean', 'numpy.mean', (['value_list'], {}), '(value_list)\n', (2997, 3009), False, 'import numpy\n'), ((665, 688), 'numpy.cos', 'numpy.cos', (['angle_radian'], {}), '(angle_radian)\n', (674, 688), False, 'import numpy\n'), ((740, 763), 'numpy.sin', 'numpy.sin', (['angle_radian'], {}), '(angle_radian)\n', (749, 763), False, 'import numpy\n'), ((765, 788), 'numpy.cos', 'numpy.cos', (['angle_radian'], {}), '(angle_radian)\n', (774, 788), False, 'import numpy\n'), ((691, 714), 'numpy.sin', 'numpy.sin', (['angle_radian'], {}), '(angle_radian)\n', (700, 714), False, 'import numpy\n'), ((836, 859), 'numpy.cos', 'numpy.cos', (['angle_radian'], {}), '(angle_radian)\n', (845, 859), False, 'import numpy\n'), ((864, 887), 'numpy.sin', 'numpy.sin', (['angle_radian'], {}), '(angle_radian)\n', (873, 887), False, 'import numpy\n'), ((950, 973), 'numpy.cos', 'numpy.cos', (['angle_radian'], {}), '(angle_radian)\n', (959, 973), False, 'import numpy\n'), ((922, 945), 'numpy.sin', 'numpy.sin', (['angle_radian'], {}), '(angle_radian)\n', (931, 945), False, 'import numpy\n'), ((1021, 1044), 'numpy.cos', 'numpy.cos', (['angle_radian'], {}), '(angle_radian)\n', (1030, 1044), False, 'import numpy\n'), ((1096, 1119), 'numpy.sin', 'numpy.sin', (['angle_radian'], {}), '(angle_radian)\n', (1105, 1119), False, 'import numpy\n'), ((1121, 1144), 'numpy.cos', 'numpy.cos', (['angle_radian'], {}), '(angle_radian)\n', (1130, 1144), False, 'import numpy\n'), ((1047, 1070), 'numpy.sin', 'numpy.sin', (['angle_radian'], {}), '(angle_radian)\n', (1056, 1070), False, 'import numpy\n')] |
import numpy as np
import PIL
from keras.applications import ResNet50
import io
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
class PhotoProcessor(object):
def __init__(self, image):
self._image = Image.open(io.BytesIO(image))
self.exif = self.get_exif_data(self._image)
def get_exif_data(self, image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
if image.__getattribute__('_getexif'):
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
def prepare_rgb_data(self, img_size):
rgb_data = None
if self._image.mode != 'RGB':
rgb_data = self._image.convert('RGB ')
else:
rgb_data = self._image
rgb_data = rgb_data.resize(img_size)
rgb_data = img_to_array(rgb_data)
rgb_data = np.expand_dims(rgb_data, axis=0)
rgb_data = imagenet_utils.preprocess_input(rgb_data)
return rgb_data | [
"io.BytesIO",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.applications.imagenet_utils.preprocess_input",
"PIL.ExifTags.GPSTAGS.get",
"PIL.ExifTags.TAGS.get"
] | [((1439, 1461), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['rgb_data'], {}), '(rgb_data)\n', (1451, 1461), False, 'from keras.preprocessing.image import img_to_array\n'), ((1481, 1513), 'numpy.expand_dims', 'np.expand_dims', (['rgb_data'], {'axis': '(0)'}), '(rgb_data, axis=0)\n', (1495, 1513), True, 'import numpy as np\n'), ((1533, 1574), 'keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['rgb_data'], {}), '(rgb_data)\n', (1564, 1574), False, 'from keras.applications import imagenet_utils\n'), ((333, 350), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (343, 350), False, 'import io\n'), ((749, 767), 'PIL.ExifTags.TAGS.get', 'TAGS.get', (['tag', 'tag'], {}), '(tag, tag)\n', (757, 767), False, 'from PIL.ExifTags import TAGS, GPSTAGS\n'), ((933, 950), 'PIL.ExifTags.GPSTAGS.get', 'GPSTAGS.get', (['t', 't'], {}), '(t, t)\n', (944, 950), False, 'from PIL.ExifTags import TAGS, GPSTAGS\n')] |
import imageio
from skimage.transform import resize
import os
import numpy as np
import sys
import h5py
def get_train_data(file_local):
data_ = []
labels_ = []
for idx in range(0, 30):
file_temp = os.path.join(file_local, str(idx + 1) + '.mp4')
# frame size is (720, 1280, 3)
label_temp = np.zeros(30)
label_temp[idx] = 1 # set the image id
video_temp = imageio.get_reader(file_temp, 'ffmpeg')
for video_frame in range(0, video_temp.get_length() - 1):
if (video_frame % 2) == 0:
sys.stdout.write('\r>> Read image from video frame ID %d/%d' % (idx, video_frame))
sys.stdout.flush()
image_temp = video_temp.get_data(video_frame)
# resize image to reduce calculate amount
image_temp = resize(image_temp, (72, 128, 3))
data_.append(image_temp)
labels_.append(label_temp)
data_ = np.asarray(data_)
labels_ = np.asarray(labels_)
print("\n creating hdf5 file...")
print("\n data is (IDX,72,128,3) labels is (IDX,30)...")
f = h5py.File('jd_pig_train_data.h5', "w")
dst_data = f.create_dataset('Data', data_.shape, np.float32)
dst_data[:] = data_[:]
dst_labels = f.create_dataset('labels', labels_.shape, np.float32)
dst_labels[:] = labels_[:]
f.close()
return data_, labels_
if __name__ == "__main__":
# include 30 pig mp4 file named as ID.mp4
train_local = 'Pig_Identification_Qualification_Train/train'
# include 3000 JPG pigs in difference size
test_local = 'Pig_Identification_Qualification_Test_A/test_A'
# Step1--read train data and labels
data, labels = get_train_data(train_local)
print(data.shape)
| [
"sys.stdout.write",
"h5py.File",
"numpy.asarray",
"numpy.zeros",
"sys.stdout.flush",
"skimage.transform.resize",
"imageio.get_reader"
] | [((968, 985), 'numpy.asarray', 'np.asarray', (['data_'], {}), '(data_)\n', (978, 985), True, 'import numpy as np\n'), ((1000, 1019), 'numpy.asarray', 'np.asarray', (['labels_'], {}), '(labels_)\n', (1010, 1019), True, 'import numpy as np\n'), ((1128, 1166), 'h5py.File', 'h5py.File', (['"""jd_pig_train_data.h5"""', '"""w"""'], {}), "('jd_pig_train_data.h5', 'w')\n", (1137, 1166), False, 'import h5py\n'), ((328, 340), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (336, 340), True, 'import numpy as np\n'), ((410, 449), 'imageio.get_reader', 'imageio.get_reader', (['file_temp', '"""ffmpeg"""'], {}), "(file_temp, 'ffmpeg')\n", (428, 449), False, 'import imageio\n'), ((571, 657), 'sys.stdout.write', 'sys.stdout.write', (["('\\r>> Read image from video frame ID %d/%d' % (idx, video_frame))"], {}), "('\\r>> Read image from video frame ID %d/%d' % (idx,\n video_frame))\n", (587, 657), False, 'import sys\n'), ((670, 688), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (686, 688), False, 'import sys\n'), ((838, 870), 'skimage.transform.resize', 'resize', (['image_temp', '(72, 128, 3)'], {}), '(image_temp, (72, 128, 3))\n', (844, 870), False, 'from skimage.transform import resize\n')] |
import numpy as np
from numba import types
from numba.errors import TypingError
from numba.extending import overload
@overload(np.clip)
def impl_clip(x, a, b):
# In numba type checking happens at *compile time*. We check the types of
# the arguments here, and return a proper implementation based on those
# types (or error accordingly).
# Check that `a` and `b` are scalars, and at most one of them is None.
if not isinstance(a, (types.Integer, types.Float, types.NoneType)):
raise TypingError("a must be a scalar int/float")
if not isinstance(b, (types.Integer, types.Float, types.NoneType)):
raise TypingError("b must be a scalar int/float")
if isinstance(a, types.NoneType) and isinstance(b, types.NoneType):
raise TypingError("a and b can't both be None")
if isinstance(x, (types.Integer, types.Float)):
# x is a scalar with a valid type
if isinstance(a, types.NoneType):
# a is None
def impl(x, a, b):
return min(x, b)
elif isinstance(b, types.NoneType):
# b is None
def impl(x, a, b):
return max(x, a)
else:
# neither a or b are None
def impl(x, a, b):
return min(max(x, a), b)
elif (
isinstance(x, types.Array) and
x.ndim == 1 and
isinstance(x.dtype, (types.Integer, types.Float))
):
# x is a 1D array of the proper type
def impl(x, a, b):
# Allocate an output array using standard numpy functions
out = np.empty_like(x)
# Iterate over x, calling `np.clip` on every element
for i in range(x.size):
# This will dispatch to the proper scalar implementation (as
# defined above) at *compile time*. There should have no
# overhead at runtime.
out[i] = np.clip(x[i], a, b)
return out
else:
raise TypingError("x must be an int/float or a 1D array of ints/floats")
# The call to `np.clip` has arguments with valid types, return our
# numba-compatible implementation
return impl
| [
"numba.extending.overload",
"numpy.empty_like",
"numpy.clip",
"numba.errors.TypingError"
] | [((120, 137), 'numba.extending.overload', 'overload', (['np.clip'], {}), '(np.clip)\n', (128, 137), False, 'from numba.extending import overload\n'), ((514, 557), 'numba.errors.TypingError', 'TypingError', (['"""a must be a scalar int/float"""'], {}), "('a must be a scalar int/float')\n", (525, 557), False, 'from numba.errors import TypingError\n'), ((644, 687), 'numba.errors.TypingError', 'TypingError', (['"""b must be a scalar int/float"""'], {}), "('b must be a scalar int/float')\n", (655, 687), False, 'from numba.errors import TypingError\n'), ((774, 815), 'numba.errors.TypingError', 'TypingError', (['"""a and b can\'t both be None"""'], {}), '("a and b can\'t both be None")\n', (785, 815), False, 'from numba.errors import TypingError\n'), ((1995, 2061), 'numba.errors.TypingError', 'TypingError', (['"""x must be an int/float or a 1D array of ints/floats"""'], {}), "('x must be an int/float or a 1D array of ints/floats')\n", (2006, 2061), False, 'from numba.errors import TypingError\n'), ((1596, 1612), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (1609, 1612), True, 'import numpy as np\n'), ((1928, 1947), 'numpy.clip', 'np.clip', (['x[i]', 'a', 'b'], {}), '(x[i], a, b)\n', (1935, 1947), True, 'import numpy as np\n')] |
'''
Copyright 2021 OpenDILab. All Rights Reserved:
Description: Carla simulator.
'''
import os
import numpy as np
import random
from typing import Any, Union, Optional, Dict, List
from distutils.version import LooseVersion
import pkg_resources
from collections import defaultdict
from .base_simulator import BaseSimulator
from core.utils.simulator_utils.sensor_utils import SensorHelper, CollisionSensor, TrafficLightHelper
from core.utils.simulator_utils.map_utils import BeVWrapper
from core.simulators.carla_data_provider import CarlaDataProvider
from core.utils.simulator_utils.carla_utils import control_to_signal, get_birdview
from core.utils.planner import BasicPlanner, BehaviorPlanner, LBCPlannerNew
from core.utils.others.tcp_helper import find_traffic_manager_port
import carla
from carla import WeatherParameters
PRESET_WEATHERS = {
1: WeatherParameters.ClearNoon,
2: WeatherParameters.CloudyNoon,
3: WeatherParameters.WetNoon,
4: WeatherParameters.WetCloudyNoon,
5: WeatherParameters.MidRainyNoon,
6: WeatherParameters.HardRainNoon,
7: WeatherParameters.SoftRainNoon,
8: WeatherParameters.ClearSunset,
9: WeatherParameters.CloudySunset,
10: WeatherParameters.WetSunset,
11: WeatherParameters.WetCloudySunset,
12: WeatherParameters.MidRainSunset,
13: WeatherParameters.HardRainSunset,
14: WeatherParameters.SoftRainSunset,
}
VEHICLE_NAME = 'vehicle.tesla.model3'
ROLE_NAME = 'hero'
OBS_TYPE_LIST = ['state', 'depth', 'rgb', 'segmentation', 'bev', 'lidar', 'gnss']
PLANNER_DICT = {
'basic': BasicPlanner,
'behavior': BehaviorPlanner,
'lbc': LBCPlannerNew,
}
class CarlaSimulator(BaseSimulator):
"""
Common Carla Simulator.
The simulator creates a client to Carla server, and is able to get observation, send
control signals to the hero vehicle and record essential data from the simulated world.
In the intialization period, the simulator may change the environment parameters including
maps and weathers and can add actors (including NPC vehicles, pedestrians as well as sensors
mounted on the hero vehicle),
During the running period the simulator will achieve running state and information about
the hero vehicle (such as running speed, angle, navigation goal and reference path), data
from the sensors (such as camera images, lidar points) as well as runnign status(including
collision, running off road, red light, distance and timeout to end waypoint).
Once it is created, it will set up Carla client and set the parameters in the configuration
dict as its default. When actually calling the ``init`` method to start an episode, some of
the configurations may be changed by the input arguments while others remain by default.
The simulator stores and gets some information from a static class ``CarlaDataProvider``
to avoid frequently sending message to Carla server and speed up.
Up to now, it uses Carla version 0.9.9.
If no traffic manager port is provided, it will find random free port in system.
:Arguments:
- cfg (Dict): Config Dict.
- client (carla.Client, optional): Already established Carla client. Defaults to None.
- host (str, optional): TCP host Carla client link to. Defaults to 'localhost'.
- port (int, optional): TCP port Carla client link to. Defaults to 9000.
- tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None.
- timeout (float, optional): Carla client link timeout. Defaults to 10.0.
:Interfaces:
init, get_state, get_sensor_data, get_navigation, get_information, apply_planner,
apply_control, run_step, clean_up
:Properties:
- town_name (str): Current town name.
- hero_player (carla.Actor): hero actor in simulation.
- collided (bool): Whether collided in current episode.
- ran_light (bool): Whether ran light in current frame.
- off_road (bool): Whether ran off road in current frame.
- wrong_direction (bool): Whether ran in wrong derection in current frame.
- end_distance (float): Distance to target in current frame.
- end_timeout (float): Timeout for entire route provided by planner.
- total_diatance (float): Dictance for entire route provided by planner.
"""
config = dict(
town='Town01',
weather='random',
sync_mode=True,
delta_seconds=0.1,
no_rendering=False,
auto_pilot=False,
n_vehicles=0,
n_pedestrians=0,
disable_two_wheels=False,
col_threshold=400,
waypoint_num=20,
obs=list(),
planner=dict(),
aug=None,
verbose=True,
debug=False,
)
def __init__(
self,
cfg: Dict,
client: Optional[carla.Client] = None,
host: str = 'localhost',
port: int = 9000,
tm_port: Optional[int] = None,
timeout: float = 10.0,
**kwargs
) -> None:
"""
Init Carla simulator.
"""
super().__init__(cfg)
# Check Carla API version
dist = pkg_resources.get_distribution("carla")
if LooseVersion(dist.version) < LooseVersion('0.9.8'):
raise ImportError("CARLA version 0.9.8 or newer required. CARLA version found: {}".format(dist))
# Create the client that will send the requests to the simulator
if client is None:
self._client = carla.Client(host, port)
else:
self._client = client
self._client_timeout = timeout
self._client.set_timeout(self._client_timeout)
if tm_port is None:
print("[SIMULATOR] Not providing TM port, try finding free")
max_retry = 0
while True:
try:
tm_port = find_traffic_manager_port()
self._tm = self._client.get_trafficmanager(tm_port)
print("[SIMULATOR] Using TM port:", tm_port)
break
except Exception as e:
max_retry += 1
if max_retry > 10:
raise e
else:
self._tm = self._client.get_trafficmanager(tm_port)
self._tm.set_global_distance_to_leading_vehicle(2.0)
self._tm.set_hybrid_physics_mode(True)
self._world = None
self._map = None
self._sync_mode = self._cfg.sync_mode
self._no_rendering = self._cfg.no_rendering
self._delta_seconds = self._cfg.delta_seconds
self._apply_world_setting(**self._cfg)
self._col_threshold = self._cfg.col_threshold
self._waypoint_num = self._cfg.waypoint_num
self._obs_cfg = self._cfg.obs
self._planner_cfg = self._cfg.planner
self._camera_aug_cfg = self._cfg.aug
self._verbose = self._cfg.verbose
self._tick = 0
self._timestamp = 0
self._end_location = None
self._collided = False
self._ran_light = False
self._off_road = False
self._wrong_direction = False
self._end_distance = float('inf')
self._end_timeout = float('inf')
self._hero_actor = None
self._start_location = None
self._sensor_helper = None
self._bev_wrapper = None
self._planner = None
self._collision_sensor = None
self._traffic_light_helper = None
self._actor_map = defaultdict(list)
self._debug = self._cfg.debug
def _apply_world_setting(self, **world_param) -> None:
for k in world_param:
if k is None:
world_param.pop(k)
if 'town' in world_param:
self._town_name = world_param['town']
if 'weather' in world_param:
self._weather = world_param['weather']
if 'n_vehicles' in world_param:
self._n_vehicles = world_param['n_vehicles']
if 'n_pedestrians' in world_param:
self._n_pedestrians = world_param['n_pedestrians']
if 'autopilot' in world_param:
self._autopilot = world_param['autopilot']
if 'disable_two_wheels' in world_param:
self._disable_two_wheels = world_param['disable_two_wheels']
def init(self, start: int = 0, end: int = 1, **kwargs) -> None:
"""
Init simulator episode with provided args.
This method takes start and end waypoint indexs to set a navigation goal, and will use planner to build a route
to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians dut to
default config, and provided args, which will be stored to replace old config.
If no collision happens when creating actors, the init will end and return.
:Arguments:
- start (int, optional): Index of start waypoint. Defaults to 0.
- end (int, optional): Index of end waypoint. Defaults to 1.
:Optional arguments: town, weather, n_vehicles, n_pedestrians, autopilot, disable_two_wheels
"""
self._apply_world_setting(**kwargs)
self._set_town(self._town_name)
self._set_weather(self._weather)
self._blueprints = self._world.get_blueprint_library()
while True:
self.clean_up()
CarlaDataProvider.set_client(self._client)
CarlaDataProvider.set_world(self._world)
CarlaDataProvider.set_traffic_manager_port(self._tm.get_port())
self._spawn_hero_vehicle(start_pos=start)
self._prepare_observations()
self._spawn_vehicles()
self._spawn_pedestrians()
CarlaDataProvider.on_carla_tick()
self.apply_planner(end)
self._collided = False
self._ran_light = False
self._off_road = False
self._wrong_direction = False
if self._ready():
if self._debug:
self._count_actors()
break
def _set_town(self, town: str) -> None:
if self._world is None:
self._world = self._client.load_world(town)
elif self._map.name != town:
self._world = self._client.load_world(town)
else:
self._world = self._client.get_world()
self._map = self._world.get_map()
self._set_sync_mode(self._sync_mode, self._delta_seconds)
def _set_sync_mode(self, sync: bool, delta_seconds: float = 0.1) -> None:
settings = self._world.get_settings()
if settings.synchronous_mode is not sync:
settings.synchronous_mode = sync
settings.fixed_delta_seconds = delta_seconds
settings.no_rendering_mode = self._no_rendering
self._world.apply_settings(settings)
#self._tm.set_synchronous_mode(True)
def _set_weather(self, weather_string):
if self._verbose:
print('[SIMULATOR] Setting weather: ', weather_string)
if weather_string == 'random':
weather = np.random.choice(list(PRESET_WEATHERS.values()))
else:
weather = PRESET_WEATHERS[weather_string]
self._world.set_weather(weather)
def _spawn_hero_vehicle(self, start_pos: int = 0) -> None:
start_waypoint = CarlaDataProvider.get_spawn_point(start_pos)
self._start_location = start_waypoint.location
self._hero_actor = CarlaDataProvider.request_new_actor(VEHICLE_NAME, start_waypoint, ROLE_NAME)
def _spawn_vehicles(self) -> None:
blueprints = self._blueprints.filter('vehicle.*')
if self._disable_two_wheels:
blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4]
spawn_points = self._map.get_spawn_points()
random.shuffle(spawn_points)
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
batch = []
for n, transform in enumerate(spawn_points):
if n >= self._n_vehicles:
break
blueprint = random.choice(blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
# spawn the cars and set their autopilot and light state all together
batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True, self._tm.get_port())))
for response in self._client.apply_batch_sync(batch, True):
if response.error:
print('[SIMULATOR]', response.error)
else:
CarlaDataProvider.register_actor(self._world.get_actor(response.actor_id))
def _spawn_pedestrians(self) -> None:
blueprints = self._blueprints.filter('walker.pedestrian.*')
SpawnActor = carla.command.SpawnActor
pedestrians_running = 30.0 # how many pedestrians will run
pedestrians_crossing = 30.0
peds_spawned = 0
walkers = []
controllers = []
walker_speed = []
while peds_spawned < self._n_pedestrians:
spawn_points = []
_walkers = []
_controllers = []
_walker_speed = []
# 1. take all the random locations to spawn
for i in range(self._n_pedestrians - peds_spawned):
spawn_point = carla.Transform()
loc = self._world.get_random_location_from_navigation()
if loc is not None:
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. spawn the walker object
batch = []
for spawn_point in spawn_points:
walker_bp = random.choice(blueprints)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if random.random() > pedestrians_running:
# walking
_walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
_walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
if self._verbose:
print("[SIMULATOR] Walker has no speed")
_walker_speed.append(0.0)
batch.append(SpawnActor(walker_bp, spawn_point))
results = self._client.apply_batch_sync(batch, True)
_walker_speed2 = []
for i in range(len(results)):
if results[i].error:
if self._verbose:
print('[SIMULATOR] Walker ', results[i].error)
else:
peds_spawned += 1
_walkers.append(results[i].actor_id)
_walker_speed2.append(_walker_speed[i])
_walker_speed = _walker_speed2
# 3. spawn the walker controller
walker_controller_bp = self._blueprints.find('controller.ai.walker')
batch = [SpawnActor(walker_controller_bp, carla.Transform(), walker) for walker in _walkers]
for result in self._client.apply_batch_sync(batch, True):
if result.error:
if self._verbose:
print('[SIMULATOR] walker controller ', result.error)
else:
_controllers.append(result.actor_id)
# 4. add peds and controllers into actor dict
controllers.extend(_controllers)
walkers.extend(_walkers)
walker_speed.extend(_walker_speed)
CarlaDataProvider.register_actors(self._world.get_actors(walkers))
# CarlaDataProvider.register_actors(self._world.get_actors(controllers))
# wait for a tick to ensure client receives the last transform of the walkers we have just created
self._world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
self._world.set_pedestrians_cross_factor(pedestrians_crossing)
for i, controller_id in enumerate(controllers):
controller = self._world.get_actor(controller_id)
#controller = CarlaDataProvider.get_actor_by_id(controller_id)
controller.start()
controller.go_to_location(self._world.get_random_location_from_navigation())
controller.set_max_speed(float(walker_speed[i]))
self._actor_map['walker_controller'].append(controller)
# example of how to use parameters
self._tm.global_percentage_speed_difference(30.0)
self._world.tick()
def _prepare_observations(self) -> None:
self._sensor_helper = SensorHelper(self._obs_cfg, self._camera_aug_cfg)
self._sensor_helper.setup_sensors(self._world, self._hero_actor)
while not self._sensor_helper.all_sensors_ready():
self._world.tick()
for obs_item in self._obs_cfg:
if obs_item.type == 'bev':
self._bev_wrapper = BeVWrapper(obs_item)
self._bev_wrapper.init(self._client, self._world, self._map, self._hero_actor)
planner_cls = PLANNER_DICT[self._planner_cfg.get('type', 'basic')]
self._planner = planner_cls(self._planner_cfg)
self._collision_sensor = CollisionSensor(self._hero_actor, self._col_threshold)
self._traffic_light_helper = TrafficLightHelper(self._hero_actor)
def _ready(self, ticks: int = 30) -> bool:
for _ in range(ticks):
self.run_step()
self.get_state()
self._tick = 0
self._timestamp = 0
return not self._collided
def _count_actors(self) -> None:
vehicles = []
traffic_lights = []
speed_limits = []
walkers = []
sensors = []
others = []
actors = self._world.get_actors()
actors_with_transforms = [(actor, actor.get_transform()) for actor in actors]
for actor_with_transform in actors_with_transforms:
actor = actor_with_transform[0]
if 'vehicle' in actor.type_id:
vehicles.append(actor_with_transform)
elif 'traffic_light' in actor.type_id:
traffic_lights.append(actor_with_transform)
elif 'speed_limit' in actor.type_id:
speed_limits.append(actor_with_transform)
elif 'walker.pedestrian' in actor.type_id:
walkers.append(actor_with_transform)
elif 'sensor' in actor.type_id:
sensors.append(actor_with_transform)
else:
others.append(actor_with_transform)
print("[SIMULATOR] vehicles:")
for veh in vehicles:
print('\t', veh[0].id, veh[0].type_id, veh[0].attributes['role_name'])
print("[SIMULATOR] walkers:", len(walkers))
print("[SIMULATOR] lights:", len(traffic_lights))
print("[SIMULATOR] speed limits:", len(speed_limits))
print("[SIMULATOR] sensors:")
for ss in sensors:
print('\t', ss[0])
print("[SIMULATOR] others:", len(others))
def apply_planner(self, end_idx: int) -> Dict:
"""
Aplly goal waypoint to planner in simulator. The start point is set to current hero vehicle waypoint.
:Arguments:
- end_idx (int): Index of end waypoint.
:Returns:
Dict: [description]
"""
assert self._start_location is not None
self._end_location = CarlaDataProvider.get_spawn_point(end_idx).location
self._planner.set_destination(self._start_location, self._end_location, clean=True)
self._total_distance = self._planner.distance_to_goal
self._end_timeout = self._planner.timeout
def get_state(self) -> Dict:
"""
Get running state from current world. It contains location, orientation, speed, acc,
and the state of surrounding road info suchas traffic light and junction.
:Returns:
Dict: State dict.
"""
speed = CarlaDataProvider.get_speed(self._hero_actor) * 3.6
transform = CarlaDataProvider.get_transform(self._hero_actor)
location = transform.location
forward_vector = transform.get_forward_vector()
acceleration = CarlaDataProvider.get_acceleration(self._hero_actor)
angular_velocity = CarlaDataProvider.get_angular_velocity(self._hero_actor)
velocity = CarlaDataProvider.get_speed_vector(self._hero_actor)
light_state = self._traffic_light_helper.active_light_state.value
drive_waypoint = self._map.get_waypoint(
location,
project_to_road=False,
)
is_junction = False
if drive_waypoint is not None:
is_junction = drive_waypoint.is_junction
self._off_road = False
else:
self._off_road = True
lane_waypoint = self._map.get_waypoint(location, project_to_road=True, lane_type=carla.LaneType.Driving)
lane_location = lane_waypoint.transform.location
lane_forward_vector = lane_waypoint.transform.rotation.get_forward_vector()
state = {
'speed': speed,
'location': np.array([location.x, location.y, location.z]),
'forward_vector': np.array([forward_vector.x, forward_vector.y]),
'acceleration': np.array([acceleration.x, acceleration.y, acceleration.z]),
'velocity': np.array([velocity.x, velocity.y, velocity.z]),
'angular_velocity': np.array([angular_velocity.x, angular_velocity.y, angular_velocity.z]),
'rotation': np.array([transform.rotation.pitch, transform.rotation.yaw, transform.rotation.roll]),
'is_junction': is_junction,
'lane_location': np.array([lane_location.x, lane_location.y]),
'lane_forward': np.array([lane_forward_vector.x, lane_forward_vector.y]),
'tl_state': light_state,
'tl_dis': self._traffic_light_helper.active_light_dis,
}
if lane_waypoint is None:
state['lane_forward'] = None
else:
lane_forward_vector = lane_waypoint.transform.get_forward_vector()
state['lane_forward'] = np.array([lane_forward_vector.x, lane_forward_vector.y])
return state
def get_sensor_data(self) -> Dict:
"""
Get all sensor data and bird-eye view data if exist in current world. Bird-eye view will be
converted to an multi-channel image.
:Returns:
Dict: Sensor and Bev data dict.
"""
sensor_data = self._sensor_helper.get_sensors_data()
for obs_item in self._obs_cfg:
if obs_item.type not in OBS_TYPE_LIST:
raise NotImplementedError("observation type %s not implemented" % obs_item.type)
elif obs_item.type == 'bev':
key = obs_item.name
sensor_data.update({key: get_birdview(self._bev_wrapper.get_bev_data())})
return sensor_data
def get_information(self) -> Dict:
"""
Get running information inclution time and ran light counts in current world.
:Returns:
Dict: Information dict.
"""
information = {
'tick': self._tick,
'timestamp': self._timestamp,
'total_lights': self._traffic_light_helper.total_lights,
'total_lights_ran': self._traffic_light_helper.total_lights_ran
}
return information
def get_navigation(self) -> Dict:
"""
Get navigation info in current world. Most of the contains come from planner.
:Returns:
Dict: Navigation dict.
"""
command = self._planner.node_road_option
node_location = self._planner.node_waypoint.transform.location
node_forward = self._planner.node_waypoint.transform.rotation.get_forward_vector()
target_location = self._planner.target_waypoint.transform.location
target_forward = self._planner.target_waypoint.transform.rotation.get_forward_vector()
waypoint_list = self._planner.get_waypoints_list(self._waypoint_num)
direction_list = self._planner.get_direction_list(self._waypoint_num)
agent_state = self._planner.agent_state
speed_limit = self._planner.speed_limit
self._end_distance = self._planner.distance_to_goal
self._end_timeout = self._planner.timeout
if self._bev_wrapper is not None:
self._bev_wrapper.update_waypoints(waypoint_list)
waypoint_location_list = []
for wp in waypoint_list:
wp_loc = wp.transform.location
wp_vec = wp.transform.rotation.get_forward_vector()
waypoint_location_list.append([wp_loc.x, wp_loc.y, wp_vec.x, wp_vec.y])
if not self._off_road:
current_waypoint = self._planner.current_waypoint
node_waypoint = self._planner.node_waypoint
# Lanes and roads are too chaotic at junctions
if current_waypoint.is_junction or node_waypoint.is_junction:
self._wrong_direction = False
else:
node_yaw = node_waypoint.transform.rotation.yaw % 360
cur_yaw = current_waypoint.transform.rotation.yaw % 360
wp_angle = (node_yaw - cur_yaw) % 360
if 150 <= wp_angle <= (360 - 150):
self._wrong_direction = True
else:
# Changing to a lane with the same direction
self._wrong_direction = False
navigation = {
'agent_state': agent_state.value,
'command': command.value,
'node': np.array([node_location.x, node_location.y]),
'node_forward': np.array([node_forward.x, node_forward.y]),
'target': np.array([target_location.x, target_location.y]),
'target_forward': np.array([target_forward.x, target_forward.y]),
'waypoint_list': np.array(waypoint_location_list),
'speed_limit': np.array(speed_limit),
'direction_list': np.array(direction_list)
}
return navigation
def run_step(self) -> None:
"""
Run one step simulation.
This will tick Carla world and update informations for all sensors and measurement.
"""
self._world.tick()
self._tick += 1
world_snapshot = self._world.get_snapshot()
self._timestamp = world_snapshot.timestamp.elapsed_seconds
CarlaDataProvider.on_carla_tick()
if self._planner is not None:
self._planner.run_step()
self._collided = self._collision_sensor.collided
self._traffic_light_helper.tick()
self._ran_light = self._traffic_light_helper.ran_light
if self._bev_wrapper is not None:
if CarlaDataProvider._hero_vehicle_route is not None:
self._bev_wrapper.tick()
def apply_control(self, control: Dict = None) -> None:
"""
Apply control signal for hero player in simulator.
This will send message to the client and the control takes effect in next tick
:Arguments:
- control (dict, optional): Control signal dict. Default to None.
"""
if control is not None:
control_signal = control_to_signal(control)
self._hero_actor.apply_control(control_signal)
def clean_up(self) -> None:
"""
Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider.
This will NOT destroy the Carla client, so simulator can use same carla client to start next episode.
"""
for actor in self._actor_map['walker_controller']:
actor.stop()
actor.destroy()
self._actor_map['walker_controller'].clear()
self._actor_map.clear()
if self._sensor_helper is not None:
self._sensor_helper.clean_up()
if self._bev_wrapper is not None:
self._bev_wrapper.clear()
if self._collision_sensor is not None:
self._collision_sensor.clear()
if self._planner is not None:
self._planner.clean_up()
self._tick = 0
self._timestamp = 0
self._collided = False
self._ran_light = False
self._off_road = False
self._wrong_direction = False
self._end_distance = float('inf')
self._end_timeout = float('inf')
CarlaDataProvider.clean_up()
@property
def town_name(self) -> str:
return self._town_name
@property
def hero_player(self) -> carla.Actor:
return self._hero_actor
@property
def collided(self) -> bool:
return self._collided
@property
def ran_light(self) -> bool:
return self._ran_light
@property
def off_road(self) -> bool:
return self._off_road
@property
def wrong_direction(self) -> bool:
return self._wrong_direction
@property
def end_distance(self) -> float:
return self._end_distance
@property
def end_timeout(self) -> float:
return self._end_timeout
@property
def total_diatance(self) -> float:
return self._total_distance
| [
"core.utils.simulator_utils.carla_utils.control_to_signal",
"core.utils.simulator_utils.sensor_utils.TrafficLightHelper",
"random.shuffle",
"core.simulators.carla_data_provider.CarlaDataProvider.get_acceleration",
"core.simulators.carla_data_provider.CarlaDataProvider.get_speed_vector",
"collections.defau... | [((5195, 5234), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""carla"""'], {}), "('carla')\n", (5225, 5234), False, 'import pkg_resources\n'), ((7531, 7548), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7542, 7548), False, 'from collections import defaultdict\n'), ((11389, 11433), 'core.simulators.carla_data_provider.CarlaDataProvider.get_spawn_point', 'CarlaDataProvider.get_spawn_point', (['start_pos'], {}), '(start_pos)\n', (11422, 11433), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((11516, 11592), 'core.simulators.carla_data_provider.CarlaDataProvider.request_new_actor', 'CarlaDataProvider.request_new_actor', (['VEHICLE_NAME', 'start_waypoint', 'ROLE_NAME'], {}), '(VEHICLE_NAME, start_waypoint, ROLE_NAME)\n', (11551, 11592), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((11886, 11914), 'random.shuffle', 'random.shuffle', (['spawn_points'], {}), '(spawn_points)\n', (11900, 11914), False, 'import random\n'), ((17514, 17563), 'core.utils.simulator_utils.sensor_utils.SensorHelper', 'SensorHelper', (['self._obs_cfg', 'self._camera_aug_cfg'], {}), '(self._obs_cfg, self._camera_aug_cfg)\n', (17526, 17563), False, 'from core.utils.simulator_utils.sensor_utils import SensorHelper, CollisionSensor, TrafficLightHelper\n'), ((18122, 18176), 'core.utils.simulator_utils.sensor_utils.CollisionSensor', 'CollisionSensor', (['self._hero_actor', 'self._col_threshold'], {}), '(self._hero_actor, self._col_threshold)\n', (18137, 18176), False, 'from core.utils.simulator_utils.sensor_utils import SensorHelper, CollisionSensor, TrafficLightHelper\n'), ((18214, 18250), 'core.utils.simulator_utils.sensor_utils.TrafficLightHelper', 'TrafficLightHelper', (['self._hero_actor'], {}), '(self._hero_actor)\n', (18232, 18250), False, 'from core.utils.simulator_utils.sensor_utils import SensorHelper, CollisionSensor, TrafficLightHelper\n'), ((20957, 21006), 'core.simulators.carla_data_provider.CarlaDataProvider.get_transform', 'CarlaDataProvider.get_transform', (['self._hero_actor'], {}), '(self._hero_actor)\n', (20988, 21006), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((21124, 21176), 'core.simulators.carla_data_provider.CarlaDataProvider.get_acceleration', 'CarlaDataProvider.get_acceleration', (['self._hero_actor'], {}), '(self._hero_actor)\n', (21158, 21176), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((21204, 21260), 'core.simulators.carla_data_provider.CarlaDataProvider.get_angular_velocity', 'CarlaDataProvider.get_angular_velocity', (['self._hero_actor'], {}), '(self._hero_actor)\n', (21242, 21260), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((21280, 21332), 'core.simulators.carla_data_provider.CarlaDataProvider.get_speed_vector', 'CarlaDataProvider.get_speed_vector', (['self._hero_actor'], {}), '(self._hero_actor)\n', (21314, 21332), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((27409, 27442), 'core.simulators.carla_data_provider.CarlaDataProvider.on_carla_tick', 'CarlaDataProvider.on_carla_tick', ([], {}), '()\n', (27440, 27442), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((29396, 29424), 'core.simulators.carla_data_provider.CarlaDataProvider.clean_up', 'CarlaDataProvider.clean_up', ([], {}), '()\n', (29422, 29424), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((5246, 5272), 'distutils.version.LooseVersion', 'LooseVersion', (['dist.version'], {}), '(dist.version)\n', (5258, 5272), False, 'from distutils.version import LooseVersion\n'), ((5275, 5296), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.9.8"""'], {}), "('0.9.8')\n", (5287, 5296), False, 'from distutils.version import LooseVersion\n'), ((5535, 5559), 'carla.Client', 'carla.Client', (['host', 'port'], {}), '(host, port)\n', (5547, 5559), False, 'import carla\n'), ((9407, 9449), 'core.simulators.carla_data_provider.CarlaDataProvider.set_client', 'CarlaDataProvider.set_client', (['self._client'], {}), '(self._client)\n', (9435, 9449), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((9462, 9502), 'core.simulators.carla_data_provider.CarlaDataProvider.set_world', 'CarlaDataProvider.set_world', (['self._world'], {}), '(self._world)\n', (9489, 9502), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((9762, 9795), 'core.simulators.carla_data_provider.CarlaDataProvider.on_carla_tick', 'CarlaDataProvider.on_carla_tick', ([], {}), '()\n', (9793, 9795), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((12217, 12242), 'random.choice', 'random.choice', (['blueprints'], {}), '(blueprints)\n', (12230, 12242), False, 'import random\n'), ((20331, 20373), 'core.simulators.carla_data_provider.CarlaDataProvider.get_spawn_point', 'CarlaDataProvider.get_spawn_point', (['end_idx'], {}), '(end_idx)\n', (20364, 20373), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((20885, 20930), 'core.simulators.carla_data_provider.CarlaDataProvider.get_speed', 'CarlaDataProvider.get_speed', (['self._hero_actor'], {}), '(self._hero_actor)\n', (20912, 20930), False, 'from core.simulators.carla_data_provider import CarlaDataProvider\n'), ((22053, 22099), 'numpy.array', 'np.array', (['[location.x, location.y, location.z]'], {}), '([location.x, location.y, location.z])\n', (22061, 22099), True, 'import numpy as np\n'), ((22131, 22177), 'numpy.array', 'np.array', (['[forward_vector.x, forward_vector.y]'], {}), '([forward_vector.x, forward_vector.y])\n', (22139, 22177), True, 'import numpy as np\n'), ((22207, 22265), 'numpy.array', 'np.array', (['[acceleration.x, acceleration.y, acceleration.z]'], {}), '([acceleration.x, acceleration.y, acceleration.z])\n', (22215, 22265), True, 'import numpy as np\n'), ((22291, 22337), 'numpy.array', 'np.array', (['[velocity.x, velocity.y, velocity.z]'], {}), '([velocity.x, velocity.y, velocity.z])\n', (22299, 22337), True, 'import numpy as np\n'), ((22371, 22441), 'numpy.array', 'np.array', (['[angular_velocity.x, angular_velocity.y, angular_velocity.z]'], {}), '([angular_velocity.x, angular_velocity.y, angular_velocity.z])\n', (22379, 22441), True, 'import numpy as np\n'), ((22467, 22557), 'numpy.array', 'np.array', (['[transform.rotation.pitch, transform.rotation.yaw, transform.rotation.roll]'], {}), '([transform.rotation.pitch, transform.rotation.yaw, transform.\n rotation.roll])\n', (22475, 22557), True, 'import numpy as np\n'), ((22623, 22667), 'numpy.array', 'np.array', (['[lane_location.x, lane_location.y]'], {}), '([lane_location.x, lane_location.y])\n', (22631, 22667), True, 'import numpy as np\n'), ((22697, 22753), 'numpy.array', 'np.array', (['[lane_forward_vector.x, lane_forward_vector.y]'], {}), '([lane_forward_vector.x, lane_forward_vector.y])\n', (22705, 22753), True, 'import numpy as np\n'), ((23073, 23129), 'numpy.array', 'np.array', (['[lane_forward_vector.x, lane_forward_vector.y]'], {}), '([lane_forward_vector.x, lane_forward_vector.y])\n', (23081, 23129), True, 'import numpy as np\n'), ((26576, 26620), 'numpy.array', 'np.array', (['[node_location.x, node_location.y]'], {}), '([node_location.x, node_location.y])\n', (26584, 26620), True, 'import numpy as np\n'), ((26650, 26692), 'numpy.array', 'np.array', (['[node_forward.x, node_forward.y]'], {}), '([node_forward.x, node_forward.y])\n', (26658, 26692), True, 'import numpy as np\n'), ((26716, 26764), 'numpy.array', 'np.array', (['[target_location.x, target_location.y]'], {}), '([target_location.x, target_location.y])\n', (26724, 26764), True, 'import numpy as np\n'), ((26796, 26842), 'numpy.array', 'np.array', (['[target_forward.x, target_forward.y]'], {}), '([target_forward.x, target_forward.y])\n', (26804, 26842), True, 'import numpy as np\n'), ((26873, 26905), 'numpy.array', 'np.array', (['waypoint_location_list'], {}), '(waypoint_location_list)\n', (26881, 26905), True, 'import numpy as np\n'), ((26934, 26955), 'numpy.array', 'np.array', (['speed_limit'], {}), '(speed_limit)\n', (26942, 26955), True, 'import numpy as np\n'), ((26987, 27011), 'numpy.array', 'np.array', (['direction_list'], {}), '(direction_list)\n', (26995, 27011), True, 'import numpy as np\n'), ((28223, 28249), 'core.utils.simulator_utils.carla_utils.control_to_signal', 'control_to_signal', (['control'], {}), '(control)\n', (28240, 28249), False, 'from core.utils.simulator_utils.carla_utils import control_to_signal, get_birdview\n'), ((13859, 13876), 'carla.Transform', 'carla.Transform', ([], {}), '()\n', (13874, 13876), False, 'import carla\n'), ((14224, 14249), 'random.choice', 'random.choice', (['blueprints'], {}), '(blueprints)\n', (14237, 14249), False, 'import random\n'), ((17842, 17862), 'core.utils.simulator_utils.map_utils.BeVWrapper', 'BeVWrapper', (['obs_item'], {}), '(obs_item)\n', (17852, 17862), False, 'from core.utils.simulator_utils.map_utils import BeVWrapper\n'), ((5905, 5932), 'core.utils.others.tcp_helper.find_traffic_manager_port', 'find_traffic_manager_port', ([], {}), '()\n', (5930, 5932), False, 'from core.utils.others.tcp_helper import find_traffic_manager_port\n'), ((15791, 15808), 'carla.Transform', 'carla.Transform', ([], {}), '()\n', (15806, 15808), False, 'import carla\n'), ((14533, 14548), 'random.random', 'random.random', ([], {}), '()\n', (14546, 14548), False, 'import random\n')] |
# Copyright 2014 <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME> and <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions to calculate the z-projection matrices.
Calculates the full size matrices for the projection on the quantization
axis for electron spin, orbital angular momentum, nuclear spin, and the
coupled angular momentum F. Essentially takes results from jz and puts
them in the full Hilbert space.
Calls jz from ang_mom.
Last updated 2018-07-04 MAZ
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
from numpy import identity
from scipy.linalg import kron
from elecsus.libs.ang_mom import jz
def sz(L,S,I):
Sz=jz(S)
gL=int(2*L+1)
Li=identity(gL)
gI=int(2*I+1)
Ii=identity(gI)
sz=kron(kron(Li,Sz),Ii)
return sz
def lz(L,S,I):
gS=int(2*S+1)
Si=identity(gS)
Lz=jz(L)
gI=int(2*I+1)
Ii=identity(gI)
lz=kron(kron(Lz,Si),Ii)
return lz
def Iz(L,S,I):
gS=int(2*S+1)
gL=int(2*L+1)
Si=identity(gS)
Li=identity(gL)
Iz_num=jz(I)
Iz=kron(kron(Li,Si),Iz_num)
return Iz
def fz(L,S,I):
gS=int(2*S+1)
Sz=jz(S)
Si=identity(gS)
gL=int(2*L+1)
Lz=jz(L)
Li=identity(gL)
gJ=gL*gS
Jz=kron(Lz,Si)+kron(Li,Sz)
Ji=identity(gJ)
gI=int(2*I+1)
Iz=jz(I)
Ii=identity(gI)
Fz=kron(Jz,Ii)+kron(Ji,Iz)
return Fz
| [
"scipy.linalg.kron",
"numpy.identity",
"elecsus.libs.ang_mom.jz"
] | [((1182, 1187), 'elecsus.libs.ang_mom.jz', 'jz', (['S'], {}), '(S)\n', (1184, 1187), False, 'from elecsus.libs.ang_mom import jz\n'), ((1213, 1225), 'numpy.identity', 'identity', (['gL'], {}), '(gL)\n', (1221, 1225), False, 'from numpy import identity\n'), ((1251, 1263), 'numpy.identity', 'identity', (['gI'], {}), '(gI)\n', (1259, 1263), False, 'from numpy import identity\n'), ((1347, 1359), 'numpy.identity', 'identity', (['gS'], {}), '(gS)\n', (1355, 1359), False, 'from numpy import identity\n'), ((1367, 1372), 'elecsus.libs.ang_mom.jz', 'jz', (['L'], {}), '(L)\n', (1369, 1372), False, 'from elecsus.libs.ang_mom import jz\n'), ((1398, 1410), 'numpy.identity', 'identity', (['gI'], {}), '(gI)\n', (1406, 1410), False, 'from numpy import identity\n'), ((1512, 1524), 'numpy.identity', 'identity', (['gS'], {}), '(gS)\n', (1520, 1524), False, 'from numpy import identity\n'), ((1532, 1544), 'numpy.identity', 'identity', (['gL'], {}), '(gL)\n', (1540, 1544), False, 'from numpy import identity\n'), ((1556, 1561), 'elecsus.libs.ang_mom.jz', 'jz', (['I'], {}), '(I)\n', (1558, 1561), False, 'from elecsus.libs.ang_mom import jz\n'), ((1649, 1654), 'elecsus.libs.ang_mom.jz', 'jz', (['S'], {}), '(S)\n', (1651, 1654), False, 'from elecsus.libs.ang_mom import jz\n'), ((1662, 1674), 'numpy.identity', 'identity', (['gS'], {}), '(gS)\n', (1670, 1674), False, 'from numpy import identity\n'), ((1700, 1705), 'elecsus.libs.ang_mom.jz', 'jz', (['L'], {}), '(L)\n', (1702, 1705), False, 'from elecsus.libs.ang_mom import jz\n'), ((1713, 1725), 'numpy.identity', 'identity', (['gL'], {}), '(gL)\n', (1721, 1725), False, 'from numpy import identity\n'), ((1777, 1789), 'numpy.identity', 'identity', (['gJ'], {}), '(gJ)\n', (1785, 1789), False, 'from numpy import identity\n'), ((1815, 1820), 'elecsus.libs.ang_mom.jz', 'jz', (['I'], {}), '(I)\n', (1817, 1820), False, 'from elecsus.libs.ang_mom import jz\n'), ((1828, 1840), 'numpy.identity', 'identity', (['gI'], {}), '(gI)\n', (1836, 1840), False, 'from numpy import identity\n'), ((1276, 1288), 'scipy.linalg.kron', 'kron', (['Li', 'Sz'], {}), '(Li, Sz)\n', (1280, 1288), False, 'from scipy.linalg import kron\n'), ((1423, 1435), 'scipy.linalg.kron', 'kron', (['Lz', 'Si'], {}), '(Lz, Si)\n', (1427, 1435), False, 'from scipy.linalg import kron\n'), ((1574, 1586), 'scipy.linalg.kron', 'kron', (['Li', 'Si'], {}), '(Li, Si)\n', (1578, 1586), False, 'from scipy.linalg import kron\n'), ((1746, 1758), 'scipy.linalg.kron', 'kron', (['Lz', 'Si'], {}), '(Lz, Si)\n', (1750, 1758), False, 'from scipy.linalg import kron\n'), ((1758, 1770), 'scipy.linalg.kron', 'kron', (['Li', 'Sz'], {}), '(Li, Sz)\n', (1762, 1770), False, 'from scipy.linalg import kron\n'), ((1848, 1860), 'scipy.linalg.kron', 'kron', (['Jz', 'Ii'], {}), '(Jz, Ii)\n', (1852, 1860), False, 'from scipy.linalg import kron\n'), ((1860, 1872), 'scipy.linalg.kron', 'kron', (['Ji', 'Iz'], {}), '(Ji, Iz)\n', (1864, 1872), False, 'from scipy.linalg import kron\n')] |
from typing import Any, Tuple
import numpy as np
from hlrl.core.common.wrappers import MethodWrapper
class VectorizedEnv(MethodWrapper):
"""
A wrapper of vectorized environments, used to handle terminal steps
properly.
"""
def step(
self,
action: Tuple[Any]
) -> Tuple[Tuple[Any], Tuple[Any], Tuple[Any], Any]:
"""
Takes 1 step into the environment using the given action.
Args:
action: The action to take in the environment.
Returns:
The next state, reward, terminal and additional information of this
environment step.
"""
state, reward, terminal, info = self.om.step(action)
reward = np.expand_dims(reward, axis=-1)
terminal = np.array([[terminal]] * len(state))
return state, reward, terminal, info
| [
"numpy.expand_dims"
] | [((737, 768), 'numpy.expand_dims', 'np.expand_dims', (['reward'], {'axis': '(-1)'}), '(reward, axis=-1)\n', (751, 768), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys, os
import cv2
import argparse
import numpy as np
import warnings
from keras import backend as K
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import util, utilFit, utilDataGenerator, utilModelREDNet
util.init()
warnings.filterwarnings('ignore')
K.set_image_data_format('channels_last')
if K.backend() == 'tensorflow':
import tensorflow as tf # Memory control with Tensorflow
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.compat.v1.Session(config=config)
# ----------------------------------------------------------------------------
def load_dataset_folds(dbname, dbparam):
train_folds = []
test_folds = []
DIBCO = [ ['Dibco/2009/handwritten_GR', 'Dibco/2009/printed_GR'],
['Dibco/2010/handwritten_GR'],
['Dibco/2011/handwritten_GR', 'Dibco/2011/printed_GR'],
['Dibco/2012/handwritten_GR'],
['Dibco/2013/handwritten_GR', 'Dibco/2013/printed_GR'],
['Dibco/2014/handwritten_GR'],
['Dibco/2016/handwritten_GR'] ]
PALM_train = [ ['Palm/Challenge-1-ForTrain/gt1_GR'], ['Palm/Challenge-1-ForTrain/gt2_GR'] ]
PALM_test = [ ['Palm/Challenge-1-ForTest/gt1_GR'], ['Palm/Challenge-1-ForTest/gt2_GR'] ]
PHI_train = ['PHI/train/phi_GR']
PHI_test = ['PHI/test/phi_GR']
EINSIELDELN_train = ['Einsieldeln/train/ein_GR']
EINSIELDELN_test = ['Einsieldeln/test/ein_GR']
SALZINNES_train = ['Salzinnes/train/sal_GR']
SALZINNES_test = ['Salzinnes/test/sal_GR']
VOYNICH_test = ['Voynich/voy_GR']
BDI_train = ['BDI/train/bdi11_GR']
BDI_test = ['BDI/test/bdi11_GR']
if dbname == 'dibco':
dbparam = int(dbparam)
test_folds = DIBCO[dbparam]
DIBCO.pop(dbparam)
train_folds = [val for sublist in DIBCO for val in sublist]
elif dbname == 'palm':
dbparam = int(dbparam)
train_folds = PALM_train[dbparam]
test_folds = PALM_test[dbparam]
elif dbname == 'phi':
train_folds = PHI_train
test_folds = PHI_test
elif dbname == 'ein':
train_folds = EINSIELDELN_train
test_folds = EINSIELDELN_test
elif dbname == 'sal':
train_folds = SALZINNES_train
test_folds = SALZINNES_test
elif dbname == 'voy':
train_folds = [val for sublist in DIBCO for val in sublist]
test_folds = VOYNICH_test
elif dbname == 'bdi':
train_folds = BDI_train
test_folds = BDI_test
elif dbname == 'all':
test_folds = [DIBCO[5], DIBCO[6]]
test_folds.append(PALM_test[0])
test_folds.append(PALM_test[1])
test_folds.append(PHI_test)
test_folds.append(EINSIELDELN_test)
test_folds.append(SALZINNES_test)
DIBCO.pop(6)
DIBCO.pop(5)
train_folds = [[val for sublist in DIBCO for val in sublist]]
train_folds.append(PALM_train[0])
train_folds.append(PALM_train[1])
train_folds.append(PHI_train)
train_folds.append(EINSIELDELN_train)
train_folds.append(SALZINNES_train)
test_folds = [val for sublist in test_folds for val in sublist] # transform to flat lists
train_folds = [val for sublist in train_folds for val in sublist]
else:
raise Exception('Unknown database name')
return train_folds, test_folds
# ----------------------------------------------------------------------------
def save_images(autoencoder, args, test_folds):
assert(args.threshold != -1)
array_files = util.load_array_of_files(args.path, test_folds)
for fname in array_files:
print('Processing image', fname)
img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
img = np.asarray(img)
rows = img.shape[0]
cols = img.shape[1]
if img.shape[0] < args.window or img.shape[1] < args.window:
new_rows = args.window if img.shape[0] < args.window else img.shape[0]
new_cols = args.window if img.shape[1] < args.window else img.shape[1]
img = cv2.resize(img, (new_cols, new_rows), interpolation = cv2.INTER_CUBIC)
img = np.asarray(img).astype('float32')
img = 255. - img
finalImg = np.zeros(img.shape, dtype=bool)
for (x, y, window) in utilDataGenerator.sliding_window(img, stepSize=args.step, windowSize=(args.window, args.window)):
if window.shape[0] != args.window or window.shape[1] != args.window:
continue
roi = img[y:(y + args.window), x:(x + args.window)].copy()
roi = roi.reshape(1, args.window, args.window, 1)
roi = roi.astype('float32') #/ 255.
prediction = autoencoder.predict(roi)
prediction = (prediction > args.threshold)
finalImg[y:(y + args.window), x:(x + args.window)] = prediction[0].reshape(args.window, args.window)
finalImg = 1 - finalImg
finalImg *= 255
finalImg = finalImg.astype('uint8')
if finalImg.shape[0] != rows or finalImg.shape[1] != cols:
finalImg = cv2.resize(finalImg, (cols, rows), interpolation = cv2.INTER_CUBIC)
outFilename = fname.replace('_GR/', '_PR-' + args.modelpath + '/')
util.mkdirp( os.path.dirname(outFilename) )
cv2.imwrite(outFilename, finalImg)
# ----------------------------------------------------------------------------
def parse_menu():
parser = argparse.ArgumentParser(description='A selectional auto-encoder approach for document image binarization')
parser.add_argument('-path', required=True, help='base path to datasets')
parser.add_argument('-db', required=True, choices=['dibco','palm','phi','ein','sal','voy','bdi','all'], help='Database name')
parser.add_argument('-dbp', help='Database dependent parameters [dibco fold, palm gt]')
parser.add_argument('--aug', action='store_true', help='Load augmentation folders')
parser.add_argument('-w', default=256, dest='window', type=int, help='window size')
parser.add_argument('-s', default=-1, dest='step', type=int, help='step size. -1 to use window size')
parser.add_argument('-f', default=64, dest='nb_filters', type=int, help='nb_filters')
parser.add_argument('-k', default=5, dest='kernel', type=int, help='kernel size')
parser.add_argument('-drop', default=0, dest='dropout', type=float, help='dropout value')
parser.add_argument('-page', default=-1, type=int, help='Page size to divide the training set. -1 to load all')
parser.add_argument('-start_from', default=0, type=int, help='Start from this page')
parser.add_argument('-super', default=1, dest='nb_super_epoch', type=int, help='nb_super_epoch')
parser.add_argument('-th', default=-1, dest='threshold', type=float, help='threshold. -1 to test from 0 to 1')
parser.add_argument('-e', default=200, dest='nb_epoch', type=int, help='nb_epoch')
parser.add_argument('-b', default=10, dest='batch', type=int, help='batch size')
parser.add_argument('-esmode', default='p', dest='early_stopping_mode', help="early_stopping_mode. g='global', p='per page'")
parser.add_argument('-espat', default=10, dest='early_stopping_patience',type=int,help="early_stopping_patience")
parser.add_argument('-verbose', default=1, type=int, help='1=show batch increment, other=mute')
parser.add_argument('-stride', default=2, type=int, help='RED-Net stride')
parser.add_argument('-every', default=1, type=int, help='RED-Net shortcuts every x layers')
parser.add_argument('--test', action='store_true', help='Only run test')
parser.add_argument('-loadmodel', type=str, help='Weights filename to load for test')
args = parser.parse_args()
if args.step == -1:
args.step = args.window
return args
# ----------------------------------------------------------------------------
def define_weights_filename(config):
if config.loadmodel != None:
weights_filename = config.loadmodel + '_ftune' + str(config.db) + str(config.dbp) + '.h5'
else:
BASE_LOG_NAME = "{}_{}_{}x{}_s{}{}{}_f{}_k{}{}_se{}_e{}_b{}_es{}".format(
config.db, config.dbp,
config.window, config.window, config.step,
'_aug' if config.aug else '',
'_drop'+str(config.dropout) if config.dropout > 0 else '',
config.nb_filters,
config.kernel,
'_s' + str(config.stride) if config.stride > 1 else '',
config.nb_super_epoch, config.nb_epoch, config.batch,
config.early_stopping_mode)
weights_filename = 'model_weights_' + BASE_LOG_NAME + '.h5'
return weights_filename
# ----------------------------------------------------------------------------
def build_SAE_network(config, weights_filename):
nb_layers = 5
autoencoder, encoder, decoder = utilModelREDNet.build_REDNet(nb_layers,
config.window, config.nb_filters,
config.kernel, config.dropout,
config.stride, config.every)
autoencoder.compile(optimizer='adam', loss=util.micro_fm, metrics=['mse'])
print(autoencoder.summary())
if config.loadmodel != None:
print('Loading initial weights from', config.loadmodel )
autoencoder.load_weights( config.loadmodel )
elif config.test == True:
print('Loading test weights from', weights_filename )
autoencoder.load_weights( weights_filename )
return autoencoder
# ----------------------------------------------------------------------------
def main(args=None):
args = parse_menu()
x_sufix = '_GR'
y_sufix = '_GT'
weights_filename = define_weights_filename(args)
print('Loading data...')
train_folds, test_folds = load_dataset_folds(args.db, args.dbp)
# Run data augmentation ?
if args.aug == True: # Add the augmented folders
for f in list(train_folds):
train_folds.append( util.rreplace(f, '/', '/aug_', 1) )
array_test_files = util.load_array_of_files(args.path, test_folds)
x_test, y_test = utilDataGenerator.generate_chunks(array_test_files, x_sufix, y_sufix, args.window, args.window)
if args.test == False:
array_train_files = util.load_array_of_files(args.path, train_folds)
train_data_generator = utilDataGenerator.LazyChunkGenerator(array_train_files, x_sufix, y_sufix, args.page, args.window, args.step)
train_data_generator.shuffle()
if args.start_from > 0:
train_data_generator.set_pos(args.start_from)
print('# Processing path:', args.path)
print('# Database:', args.db)
print('# Db param:', args.dbp)
print('# Train data:', len(train_data_generator) if args.test == False else '--')
print('# Test data:', x_test.shape)
print('# Augmentation:', args.aug)
print('# Window size:', args.window)
print('# Step size:', args.step)
print('# Init weights:', args.loadmodel)
print('# nb_filters:', args.nb_filters)
print('# kernel size:', args.kernel)
print('# Dropout:', args.dropout)
print('# nb_super_epoch:', args.nb_super_epoch)
print('# nb_pages:', args.page)
print('# nb_epoch:', args.nb_epoch)
print('# batch:', args.batch)
print('# early_stopping_mode:', args.early_stopping_mode)
print('# early_stopping_patience:', args.early_stopping_patience)
print('# Threshold:', args.threshold)
print('# Weights filename:', weights_filename)
autoencoder = build_SAE_network(args, weights_filename)
best_th = args.threshold
if args.test == False:
args.monitor='min'
best_th = utilFit.batch_fit_with_data_generator(autoencoder,
train_data_generator, x_test, y_test, args, weights_filename)
# Re-Load last weights
autoencoder.load_weights( weights_filename )
# Save output images
args.modelpath = weights_filename
args.threshold = best_th
save_images(autoencoder, args, test_folds)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"keras.backend.set_image_data_format",
"utilFit.batch_fit_with_data_generator",
"os.path.abspath",
"cv2.imwrite",
"os.path.dirname",
"tensorflow.compat.v1.Session",
"cv2.resize",
"utilDataGenerator.LazyChunkGenerator",
"keras.backend.backend",
"util.load_array_of_files... | [((307, 318), 'util.init', 'util.init', ([], {}), '()\n', (316, 318), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((319, 352), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (342, 352), False, 'import warnings\n'), ((353, 393), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (376, 393), True, 'from keras import backend as K\n'), ((398, 409), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (407, 409), True, 'from keras import backend as K\n'), ((504, 530), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (528, 530), True, 'import tensorflow as tf\n'), ((583, 618), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (603, 618), True, 'import tensorflow as tf\n'), ((3662, 3709), 'util.load_array_of_files', 'util.load_array_of_files', (['args.path', 'test_folds'], {}), '(args.path, test_folds)\n', (3686, 3709), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((5557, 5668), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A selectional auto-encoder approach for document image binarization"""'}), "(description=\n 'A selectional auto-encoder approach for document image binarization')\n", (5580, 5668), False, 'import argparse\n'), ((9715, 9852), 'utilModelREDNet.build_REDNet', 'utilModelREDNet.build_REDNet', (['nb_layers', 'config.window', 'config.nb_filters', 'config.kernel', 'config.dropout', 'config.stride', 'config.every'], {}), '(nb_layers, config.window, config.nb_filters,\n config.kernel, config.dropout, config.stride, config.every)\n', (9743, 9852), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((10953, 11000), 'util.load_array_of_files', 'util.load_array_of_files', (['args.path', 'test_folds'], {}), '(args.path, test_folds)\n', (10977, 11000), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((11022, 11122), 'utilDataGenerator.generate_chunks', 'utilDataGenerator.generate_chunks', (['array_test_files', 'x_sufix', 'y_sufix', 'args.window', 'args.window'], {}), '(array_test_files, x_sufix, y_sufix, args.\n window, args.window)\n', (11055, 11122), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((221, 246), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (236, 246), False, 'import sys, os\n'), ((3797, 3836), 'cv2.imread', 'cv2.imread', (['fname', 'cv2.IMREAD_GRAYSCALE'], {}), '(fname, cv2.IMREAD_GRAYSCALE)\n', (3807, 3836), False, 'import cv2\n'), ((3851, 3866), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (3861, 3866), True, 'import numpy as np\n'), ((4342, 4373), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'bool'}), '(img.shape, dtype=bool)\n', (4350, 4373), True, 'import numpy as np\n'), ((4405, 4506), 'utilDataGenerator.sliding_window', 'utilDataGenerator.sliding_window', (['img'], {'stepSize': 'args.step', 'windowSize': '(args.window, args.window)'}), '(img, stepSize=args.step, windowSize=(args.\n window, args.window))\n', (4437, 4506), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((5410, 5444), 'cv2.imwrite', 'cv2.imwrite', (['outFilename', 'finalImg'], {}), '(outFilename, finalImg)\n', (5421, 5444), False, 'import cv2\n'), ((11174, 11222), 'util.load_array_of_files', 'util.load_array_of_files', (['args.path', 'train_folds'], {}), '(args.path, train_folds)\n', (11198, 11222), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((11254, 11366), 'utilDataGenerator.LazyChunkGenerator', 'utilDataGenerator.LazyChunkGenerator', (['array_train_files', 'x_sufix', 'y_sufix', 'args.page', 'args.window', 'args.step'], {}), '(array_train_files, x_sufix, y_sufix,\n args.page, args.window, args.step)\n', (11290, 11366), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((12570, 12686), 'utilFit.batch_fit_with_data_generator', 'utilFit.batch_fit_with_data_generator', (['autoencoder', 'train_data_generator', 'x_test', 'y_test', 'args', 'weights_filename'], {}), '(autoencoder, train_data_generator,\n x_test, y_test, args, weights_filename)\n', (12607, 12686), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n'), ((4177, 4245), 'cv2.resize', 'cv2.resize', (['img', '(new_cols, new_rows)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (new_cols, new_rows), interpolation=cv2.INTER_CUBIC)\n', (4187, 4245), False, 'import cv2\n'), ((5204, 5269), 'cv2.resize', 'cv2.resize', (['finalImg', '(cols, rows)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(finalImg, (cols, rows), interpolation=cv2.INTER_CUBIC)\n', (5214, 5269), False, 'import cv2\n'), ((5370, 5398), 'os.path.dirname', 'os.path.dirname', (['outFilename'], {}), '(outFilename)\n', (5385, 5398), False, 'import sys, os\n'), ((4263, 4278), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (4273, 4278), True, 'import numpy as np\n'), ((10893, 10926), 'util.rreplace', 'util.rreplace', (['f', '"""/"""', '"""/aug_"""', '(1)'], {}), "(f, '/', '/aug_', 1)\n", (10906, 10926), False, 'import util, utilFit, utilDataGenerator, utilModelREDNet\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import statistics
from datetime import datetime
import copy
import json
import numpy as np
from os import listdir
from os.path import isfile, join
import unknown
import baseline
import known_approx as kapprox
import mary_optimal as mary
dirs = ['/localdisk1/DOT/flights/2018', '/localdisk1/DOT/flights/2019', '/localdisk1/DOT/flights/2020']
sourcesdir = '/localdisk1/DOT/csv/'
outputjsondir = '/localdisk1/DOT/json/'
outputdir = '/localdisk1/DOT'
alldata = '/localdisk1/DOT/allflights.csv'
# number of runs
nr = 15
# data set size
n = 5000
def merge_files():
fs = [join(d, f) for d in dirs for f in listdir(d) if isfile(join(d, f))]
#dfs = [pd.read_csv(f) for f in fs]
dfs = []
for f in fs:
dfs.append(pd.read_csv(f))
onedf = pd.concat(dfs)
onedf.to_csv(outputdir + 'allflights.csv', index=False)
print(onedf.size)
def split_into_sources():
onedf = pd.read_csv(alldata)
# getting airlines
airlines = onedf['OP_CARRIER_AIRLINE_ID'].unique()
states = onedf['ORIGIN_STATE_NM'].unique()
#states.extend(df['ORIGIN_STATE_FIPS'].unique())
states = list(set(states))
state_map = {states[i]:i for i in range(len(states))}
values = [i for i in range(len(states))]
for a in airlines:
df = onedf[(onedf['OP_CARRIER_AIRLINE_ID'] == a)]
conditions = [df['ORIGIN_STATE_NM'] == s for s in states]
df["DEMOGRAPHICS"] = np.select(conditions, values)
# making the first two columns id and demographics
df = df.reset_index()
df.insert(loc=0, column='id', value=df.index)
df.insert(loc=1, column='Demographics', value=df["DEMOGRAPHICS"])
f = sourcesdir + str(a) + '.csv'
df.to_csv(f, index=False)
print('%d has %d tuples' % (a, df.size))
u = df['Demographics'].unique()
return airlines, states
def read_datasets_unknown(ads):
bds = []
for a in ads:
d=json.load(open(a))
bd = unknown.MaryDataset(d['id'], d['data'], d['groups'], 1)
bds.append(bd)
return bds
def read_datasets_known(ads):
bds = []
for a in ads:
d=json.load(open(a, 'r'))
bd = kapprox.MaryDataset(d['id'], d['data'], d['groups'], 1)
bds.append(bd)
return bds
def read_datasets_baseline(ads):
bds = []
for a in ads:
d=json.load(open(a))
bd = baseline.MaryDataset(d['id'], d['data'], d['groups'], 1)
bds.append(bd)
return bds
def ddt_input(sourcesdir, Gs, outputdir):
sfs = [f for f in listdir(sourcesdir) if isfile(join(sourcesdir, f))]
for i in range(len(sfs)):
a = sfs[i]
df = pd.read_csv(join(sourcesdir, a))
fname = outputdir + a.replace('csv', 'json', -1)
json.dump({'id':i, 'groups':Gs, 'data': df.values.tolist()}, open(fname, 'w'))
print('done creating ddt input')
def run_exploration(sources, Gs, Qs):
print('run_exploration')
explore_is, explore_cs, ts = [],[],[]
ads = [join(sources, f) for f in listdir(sources) if isfile(join(sources, f))]
bds_raw = read_datasets_known(ads)
for j in range(2):
for i in range(nr):
print('nr %d' % (j*nr+i))
bds = copy.deepcopy(bds_raw)
t = unknown.MaryTarget(Gs, Qs)
alg = unknown.UnknownAlg(bds, t, Gs, None, budget)
st = datetime.now()
cost, iteras, rews, progs = alg.run_exploration_only()
et = datetime.now()
elt = (et - st).total_seconds() * 1000
if cost != -1:
explore_cs.append(cost)
explore_is.append(iteras)
ts.append(elt)
if len(explore_is) == 0:
print('no successful run')
results = {'time': ts, 'cost': explore_cs, 'iters': explore_is}
json.dump(results, open('results/flights_exploration.json', 'w'))
print('%d out of %d runs are successful.' % (len(explore_cs), nr))
print('explore - cost: %f iters: %f' % (sum(explore_cs)/float(len(explore_cs)), sum(explore_is)/float(len(explore_is))))
return results
def run_ucb(sources, Gs, Qs):
print('run_ucb')
ucb_cs, ucb_is, ts = [],[],[]
ads = [join(sources, f) for f in listdir(sources) if isfile(join(sources, f))]
bds_raw = read_datasets_known(ads)
for j in range(2):
for i in range(nr):
print('nr %d' % (j*nr+i))
bds = copy.deepcopy(bds_raw)
t = unknown.MaryTarget(Gs, Qs)
alg = unknown.UnknownAlg(bds, t, Gs, None, budget)
st = datetime.now()
cost, iteras, rews, progs = alg.run_ucb()
et = datetime.now()
elt = (et - st).total_seconds() * 1000
if cost != -1:
ucb_cs.append(cost)
ucb_is.append(iteras)
ts.append(elt)
results = {'time': ts, 'cost': ucb_cs, 'iters': ucb_is}
json.dump(results, open('results/flights_ucb.json', 'w'))
print('%d out of %d runs are successful.' % (len(ucb_cs), nr))
print('ucb - cost: %f iters: %f' % (sum(ucb_cs)/float(len(ucb_cs)), sum(ucb_is)/float(len(ucb_is))))
return results
def run_exploitation(sources, Gs, Qs):
print('run_exploitation')
exploit_cs, exploit_is, ts = [],[],[]
ads = [join(sources, f) for f in listdir(sources) if isfile(join(sources, f))]
bds_raw = read_datasets_known(ads)
for j in range(2):
for i in range(nr):
print('nr %d' % (j*nr+i))
bds = copy.deepcopy(bds_raw)
t = unknown.MaryTarget(Gs, Qs)
alg = unknown.UnknownAlg(bds, t, Gs, None, budget)
st = datetime.now()
cost, iteras, rews, progs = alg.run_exploitation_only()
et = datetime.now()
elt = (et - st).total_seconds() * 1000
if cost != -1:
exploit_cs.append(cost)
exploit_is.append(iteras)
ts.append(elt)
results = {'time': ts, 'cost': exploit_cs, 'iters': exploit_is}
json.dump(results, open('results/flights_exploitation.json', 'w'))
print('%d out of %d runs are successful.' % (len(exploit_cs), nr))
print('exploite - cost: %f iters: %f' % (sum(exploit_cs)/float(len(exploit_cs)), sum(exploit_is)/float(len(exploit_is))))
return results
def run_known_ddt(sources, Gs, Qs):
print('run_known_ddt')
cc_cs, cc_is, ts = [],[], []
ads = [join(sources, f) for f in listdir(sources) if isfile(join(sources, f))]
bds_raw = read_datasets_known(ads)
for j in range(2):
for i in range(nr):
print('nr %d' % (j*nr+i))
t = kapprox.MaryTarget(Gs, Qs)
bds = copy.deepcopy(bds_raw)
alg = kapprox.ApproxAlg(bds, t, Gs, budget)
st = datetime.now()
cost, iteras, rews = alg.run_CC()
et = datetime.now()
elt = (et - st).total_seconds() * 1000
print('cost %d iters %d' % (cost, iteras))
if cost != -1:
cc_cs.append(cost)
cc_is.append(iteras)
ts.append(elt)
results = {'time': ts, 'cost': cc_cs, 'iters': cc_is}
json.dump(results, open('results/flights_cc.json', 'w'))
print('%d out of %d runs are successful.' % (len(cc_cs), nr))
print('cc - cost: %f iters: %f' % (sum(cc_cs)/float(len(cc_cs)), sum(cc_is)/float(len(cc_is))))
return results
def run_baseline(sources, Gs, Qs):
print('run_baseline')
baseline_cs, baseline_is, ts = [], [],[]
ads = [join(sources, f) for f in listdir(sources) if isfile(join(sources, f))]
bds_raw = read_datasets_known(ads)
# baseline
for j in range(2):
for i in range(nr):
print('nr %d' % (j*nr+i))
t = baseline.MaryTarget(Gs, Qs)
bds = copy.deepcopy(bds_raw)
alg = baseline.BaselineAlg(bds, t, Gs, budget)
st = datetime.now()
cost, iteras = alg.run_Baseline()
et = datetime.now()
elt = (et - st).total_seconds() * 1000
if cost != -1:
baseline_cs.append(cost)
baseline_is.append(iteras)
ts.append(elt)
print('%d out of %d runs are successful.' % (len(baseline_cs), nr))
print('baseline - cost: %f iters: %f' % (sum(baseline_cs)/float(len(baseline_cs)), sum(baseline_is)/float(len(baseline_is))))
results = {'time': ts, 'cost': baseline_cs, 'iters': baseline_is}
json.dump(results, open('results/flights_baseline.json', 'w'))
return results
def plot():
cc_results = json.load(open('results/flights_cc.json', 'r'))
baseline_results = json.load(open('results/flights_baseline.json', 'r'))
#exploit_results = json.load(open('results/flights_exploitation.json', 'r'))
explore_results = json.load(open('results/flights_exploration.json', 'r'))
ucb_results = json.load(open('results/flights_ucb.json', 'r'))
# times
#print('cc time: ', statistics.mean(cc_results['time']))
#print('baseline time: ', statistics.mean(baseline_results['time']))
#print('exploit time: ', statistics.mean(exploit_results['time']))
#print('explore time: ', statistics.mean(explore_results['time']))
#print('ucb time: ', statistics.mean(ucb_results['time']))
algs = ['CouponColl', 'Baseline', 'UCB', 'Explore']#, 'Exploit-Only']
costs, iters = [], []
# prep stats
cs = [c for c in cc_results['cost'] if c > -1]
its = [c for c in cc_results['iters'] if c > -1]
if len(cs) < 3:
costs.append(np.nan)
iters.append(np.nan)
else:
costs.append(statistics.mean(cs))
iters.append(statistics.mean(its))
cs = [c for c in baseline_results['cost'] if c > -1]
its = [c for c in baseline_results['iters'] if c > -1]
if len(cs) < 20:
print('insufficient data points')
if len(cs) < 3:
costs.append(np.nan)
iters.append(np.nan)
else:
costs.append(statistics.mean(cs))
iters.append(statistics.mean(its))
cs = [c for c in ucb_results['cost'] if c > -1]
its = [c for c in ucb_results['iters'] if c > -1]
if len(cs) < 3:
costs.append(np.nan)
iters.append(np.nan)
else:
costs.append(statistics.mean(cs))
iters.append(statistics.mean(its))
cs = [c for c in explore_results['cost'] if c > -1]
its = [c for c in explore_results['iters'] if c > -1]
if len(cs) < 3:
costs.append(np.nan)
iters.append(np.nan)
else:
costs.append(statistics.mean(cs))
iters.append(statistics.mean(its))
#plot for n's
font = {'size' : 15}
plt.rc('font', **font)
fig, ax1 = plt.subplots()
width = 0.25
xs = [i for i in range(len(algs))]
ax1.set_xticks(xs)
ax1.set_xticklabels(algs)
#ax1.set_xlabel('Algorithm')
ax1.set_ylabel('Cost')
ax1.set_yscale('log')
palette = plt.get_cmap('Set2')
plt1 = ax1.bar(xs, np.array(costs), width, color=palette(2))
ax2 = ax1.twinx()
ax2.set_yscale('log')
ax2.set_ylabel('#Samples')
plt2 = ax2.plot(xs, np.array(iters), width, color=palette(1), linestyle='--')
fig.tight_layout()
plt.savefig('plots/flights.pdf')
print('plots/flights.pdf')
plt.clf()
plt.close()
def stats():
for f in listdir(sourcesdir):
if isfile(join(sourcesdir, f)):
df = pd.read_csv(join(sourcesdir, f))
print('%s: %d' % (f, df.size))
#merge_files()
#airlines, states = split_into_sources()
#print('split into %d files' % len(airlines))
#json.dump(states, open('/localdisk1/DOT/states', 'w'))
states = json.load(open('/localdisk1/DOT/states', 'r'))
Qs = [int(n/len(states)) for s in states]
Gs = [i for i in range(len(states))]
budget = 400 * sum(Qs)
#ddt_input(sourcesdir, Gs, outputjsondir)
#run_known_ddt(outputjsondir, Gs, Qs)
#run_baseline(outputjsondir, Gs, Qs)
#run_exploitation(outputjsondir, Gs, Qs)
#run_exploration(outputjsondir, Gs, Qs)
#run_ucb(outputjsondir, Gs, Qs)
plot()
#stats()
| [
"known_approx.ApproxAlg",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"os.path.join",
"known_approx.MaryTarget",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rc",
"datetime.datetime.now",
"matplotlib.pyplot.subplots",
"pandas.concat",
"copy.deepcopy",
"matplotlib.pyplot.get_cmap",
"baseline... | [((810, 824), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (819, 824), True, 'import pandas as pd\n'), ((947, 967), 'pandas.read_csv', 'pd.read_csv', (['alldata'], {}), '(alldata)\n', (958, 967), True, 'import pandas as pd\n'), ((10740, 10762), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (10746, 10762), True, 'import matplotlib.pyplot as plt\n'), ((10778, 10792), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10790, 10792), True, 'import matplotlib.pyplot as plt\n'), ((11005, 11025), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Set2"""'], {}), "('Set2')\n", (11017, 11025), True, 'import matplotlib.pyplot as plt\n'), ((11292, 11324), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/flights.pdf"""'], {}), "('plots/flights.pdf')\n", (11303, 11324), True, 'import matplotlib.pyplot as plt\n'), ((11360, 11369), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11367, 11369), True, 'import matplotlib.pyplot as plt\n'), ((11374, 11385), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11383, 11385), True, 'import matplotlib.pyplot as plt\n'), ((11414, 11433), 'os.listdir', 'listdir', (['sourcesdir'], {}), '(sourcesdir)\n', (11421, 11433), False, 'from os import listdir\n'), ((625, 635), 'os.path.join', 'join', (['d', 'f'], {}), '(d, f)\n', (629, 635), False, 'from os.path import isfile, join\n'), ((1467, 1496), 'numpy.select', 'np.select', (['conditions', 'values'], {}), '(conditions, values)\n', (1476, 1496), True, 'import numpy as np\n'), ((2016, 2071), 'unknown.MaryDataset', 'unknown.MaryDataset', (["d['id']", "d['data']", "d['groups']", '(1)'], {}), "(d['id'], d['data'], d['groups'], 1)\n", (2035, 2071), False, 'import unknown\n'), ((2222, 2277), 'known_approx.MaryDataset', 'kapprox.MaryDataset', (["d['id']", "d['data']", "d['groups']", '(1)'], {}), "(d['id'], d['data'], d['groups'], 1)\n", (2241, 2277), True, 'import known_approx as kapprox\n'), ((2426, 2482), 'baseline.MaryDataset', 'baseline.MaryDataset', (["d['id']", "d['data']", "d['groups']", '(1)'], {}), "(d['id'], d['data'], d['groups'], 1)\n", (2446, 2482), False, 'import baseline\n'), ((3046, 3062), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (3050, 3062), False, 'from os.path import isfile, join\n'), ((4247, 4263), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (4251, 4263), False, 'from os.path import isfile, join\n'), ((5347, 5363), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (5351, 5363), False, 'from os.path import isfile, join\n'), ((6489, 6505), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (6493, 6505), False, 'from os.path import isfile, join\n'), ((7605, 7621), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (7609, 7621), False, 'from os.path import isfile, join\n'), ((11049, 11064), 'numpy.array', 'np.array', (['costs'], {}), '(costs)\n', (11057, 11064), True, 'import numpy as np\n'), ((11203, 11218), 'numpy.array', 'np.array', (['iters'], {}), '(iters)\n', (11211, 11218), True, 'import numpy as np\n'), ((659, 669), 'os.listdir', 'listdir', (['d'], {}), '(d)\n', (666, 669), False, 'from os import listdir\n'), ((782, 796), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (793, 796), True, 'import pandas as pd\n'), ((2591, 2610), 'os.listdir', 'listdir', (['sourcesdir'], {}), '(sourcesdir)\n', (2598, 2610), False, 'from os import listdir\n'), ((2718, 2737), 'os.path.join', 'join', (['sourcesdir', 'a'], {}), '(sourcesdir, a)\n', (2722, 2737), False, 'from os.path import isfile, join\n'), ((3072, 3088), 'os.listdir', 'listdir', (['sources'], {}), '(sources)\n', (3079, 3088), False, 'from os import listdir\n'), ((3265, 3287), 'copy.deepcopy', 'copy.deepcopy', (['bds_raw'], {}), '(bds_raw)\n', (3278, 3287), False, 'import copy\n'), ((3304, 3330), 'unknown.MaryTarget', 'unknown.MaryTarget', (['Gs', 'Qs'], {}), '(Gs, Qs)\n', (3322, 3330), False, 'import unknown\n'), ((3349, 3393), 'unknown.UnknownAlg', 'unknown.UnknownAlg', (['bds', 't', 'Gs', 'None', 'budget'], {}), '(bds, t, Gs, None, budget)\n', (3367, 3393), False, 'import unknown\n'), ((3411, 3425), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3423, 3425), False, 'from datetime import datetime\n'), ((3510, 3524), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3522, 3524), False, 'from datetime import datetime\n'), ((4273, 4289), 'os.listdir', 'listdir', (['sources'], {}), '(sources)\n', (4280, 4289), False, 'from os import listdir\n'), ((4466, 4488), 'copy.deepcopy', 'copy.deepcopy', (['bds_raw'], {}), '(bds_raw)\n', (4479, 4488), False, 'import copy\n'), ((4505, 4531), 'unknown.MaryTarget', 'unknown.MaryTarget', (['Gs', 'Qs'], {}), '(Gs, Qs)\n', (4523, 4531), False, 'import unknown\n'), ((4550, 4594), 'unknown.UnknownAlg', 'unknown.UnknownAlg', (['bds', 't', 'Gs', 'None', 'budget'], {}), '(bds, t, Gs, None, budget)\n', (4568, 4594), False, 'import unknown\n'), ((4612, 4626), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4624, 4626), False, 'from datetime import datetime\n'), ((4698, 4712), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4710, 4712), False, 'from datetime import datetime\n'), ((5373, 5389), 'os.listdir', 'listdir', (['sources'], {}), '(sources)\n', (5380, 5389), False, 'from os import listdir\n'), ((5566, 5588), 'copy.deepcopy', 'copy.deepcopy', (['bds_raw'], {}), '(bds_raw)\n', (5579, 5588), False, 'import copy\n'), ((5605, 5631), 'unknown.MaryTarget', 'unknown.MaryTarget', (['Gs', 'Qs'], {}), '(Gs, Qs)\n', (5623, 5631), False, 'import unknown\n'), ((5650, 5694), 'unknown.UnknownAlg', 'unknown.UnknownAlg', (['bds', 't', 'Gs', 'None', 'budget'], {}), '(bds, t, Gs, None, budget)\n', (5668, 5694), False, 'import unknown\n'), ((5712, 5726), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5724, 5726), False, 'from datetime import datetime\n'), ((5812, 5826), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5824, 5826), False, 'from datetime import datetime\n'), ((6515, 6531), 'os.listdir', 'listdir', (['sources'], {}), '(sources)\n', (6522, 6531), False, 'from os import listdir\n'), ((6706, 6732), 'known_approx.MaryTarget', 'kapprox.MaryTarget', (['Gs', 'Qs'], {}), '(Gs, Qs)\n', (6724, 6732), True, 'import known_approx as kapprox\n'), ((6751, 6773), 'copy.deepcopy', 'copy.deepcopy', (['bds_raw'], {}), '(bds_raw)\n', (6764, 6773), False, 'import copy\n'), ((6793, 6830), 'known_approx.ApproxAlg', 'kapprox.ApproxAlg', (['bds', 't', 'Gs', 'budget'], {}), '(bds, t, Gs, budget)\n', (6810, 6830), True, 'import known_approx as kapprox\n'), ((6848, 6862), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6860, 6862), False, 'from datetime import datetime\n'), ((6926, 6940), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6938, 6940), False, 'from datetime import datetime\n'), ((7631, 7647), 'os.listdir', 'listdir', (['sources'], {}), '(sources)\n', (7638, 7647), False, 'from os import listdir\n'), ((7842, 7869), 'baseline.MaryTarget', 'baseline.MaryTarget', (['Gs', 'Qs'], {}), '(Gs, Qs)\n', (7861, 7869), False, 'import baseline\n'), ((7888, 7910), 'copy.deepcopy', 'copy.deepcopy', (['bds_raw'], {}), '(bds_raw)\n', (7901, 7910), False, 'import copy\n'), ((7929, 7969), 'baseline.BaselineAlg', 'baseline.BaselineAlg', (['bds', 't', 'Gs', 'budget'], {}), '(bds, t, Gs, budget)\n', (7949, 7969), False, 'import baseline\n'), ((7987, 8001), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7999, 8001), False, 'from datetime import datetime\n'), ((8065, 8079), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8077, 8079), False, 'from datetime import datetime\n'), ((9707, 9726), 'statistics.mean', 'statistics.mean', (['cs'], {}), '(cs)\n', (9722, 9726), False, 'import statistics\n'), ((9749, 9769), 'statistics.mean', 'statistics.mean', (['its'], {}), '(its)\n', (9764, 9769), False, 'import statistics\n'), ((10059, 10078), 'statistics.mean', 'statistics.mean', (['cs'], {}), '(cs)\n', (10074, 10078), False, 'import statistics\n'), ((10101, 10121), 'statistics.mean', 'statistics.mean', (['its'], {}), '(its)\n', (10116, 10121), False, 'import statistics\n'), ((10338, 10357), 'statistics.mean', 'statistics.mean', (['cs'], {}), '(cs)\n', (10353, 10357), False, 'import statistics\n'), ((10380, 10400), 'statistics.mean', 'statistics.mean', (['its'], {}), '(its)\n', (10395, 10400), False, 'import statistics\n'), ((10625, 10644), 'statistics.mean', 'statistics.mean', (['cs'], {}), '(cs)\n', (10640, 10644), False, 'import statistics\n'), ((10667, 10687), 'statistics.mean', 'statistics.mean', (['its'], {}), '(its)\n', (10682, 10687), False, 'import statistics\n'), ((11454, 11473), 'os.path.join', 'join', (['sourcesdir', 'f'], {}), '(sourcesdir, f)\n', (11458, 11473), False, 'from os.path import isfile, join\n'), ((680, 690), 'os.path.join', 'join', (['d', 'f'], {}), '(d, f)\n', (684, 690), False, 'from os.path import isfile, join\n'), ((2621, 2640), 'os.path.join', 'join', (['sourcesdir', 'f'], {}), '(sourcesdir, f)\n', (2625, 2640), False, 'from os.path import isfile, join\n'), ((3099, 3115), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (3103, 3115), False, 'from os.path import isfile, join\n'), ((4300, 4316), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (4304, 4316), False, 'from os.path import isfile, join\n'), ((5400, 5416), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (5404, 5416), False, 'from os.path import isfile, join\n'), ((6542, 6558), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (6546, 6558), False, 'from os.path import isfile, join\n'), ((7658, 7674), 'os.path.join', 'join', (['sources', 'f'], {}), '(sources, f)\n', (7662, 7674), False, 'from os.path import isfile, join\n'), ((11505, 11524), 'os.path.join', 'join', (['sourcesdir', 'f'], {}), '(sourcesdir, f)\n', (11509, 11524), False, 'from os.path import isfile, join\n')] |
import os
import numpy as np
import sys
LOAD_PATH = ['/cache/rmishra/cc16_366a_converted/spectro',
'/cache/rmishra/cc16_352a_converted/spectro',
'/cache/rmishra/cc16_352b_converted/spectro']
SAVE_PATH = '/cache/rmishra/combined'
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
for dir in LOAD_PATH:
for f in os.listdir(dir):
if 'LABEL' in f and os.path.splitext(f)[-1].lower() == ".npy":
label = np.load(os.path.join(dir,f))
if 1 in label[0:8,:]:
spectro = np.load(os.path.join(dir, f.split('LABEL')[0]+'.npy'))
combined = np.array((spectro, label))
np.save(os.path.join(SAVE_PATH, f.split('LABEL')[0]+'SPEC_LAB.npy'), combined)
LOAD_PATH = '/cache/rmishra/combined'
SAVE_PATH = '/cache/rmishra/datasets'
SAVE_FILE = '1plus_dataset.npy'
files = os.listdir(LOAD_PATH)
dataset = []
for i in range(len(files)):
current_file = np.load(os.path.join(LOAD_PATH, files[i]))
dataset.append([files[i],current_file])
dataset = np.array(dataset)
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
np.save(os.path.join(SAVE_PATH, SAVE_FILE), dataset) | [
"os.mkdir",
"os.path.exists",
"numpy.array",
"os.path.splitext",
"os.path.join",
"os.listdir"
] | [((871, 892), 'os.listdir', 'os.listdir', (['LOAD_PATH'], {}), '(LOAD_PATH)\n', (881, 892), False, 'import os\n'), ((1052, 1069), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (1060, 1069), True, 'import numpy as np\n'), ((265, 290), 'os.path.exists', 'os.path.exists', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (279, 290), False, 'import os\n'), ((296, 315), 'os.mkdir', 'os.mkdir', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (304, 315), False, 'import os\n'), ((352, 367), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (362, 367), False, 'import os\n'), ((1077, 1102), 'os.path.exists', 'os.path.exists', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (1091, 1102), False, 'import os\n'), ((1108, 1127), 'os.mkdir', 'os.mkdir', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (1116, 1127), False, 'import os\n'), ((1136, 1170), 'os.path.join', 'os.path.join', (['SAVE_PATH', 'SAVE_FILE'], {}), '(SAVE_PATH, SAVE_FILE)\n', (1148, 1170), False, 'import os\n'), ((962, 995), 'os.path.join', 'os.path.join', (['LOAD_PATH', 'files[i]'], {}), '(LOAD_PATH, files[i])\n', (974, 995), False, 'import os\n'), ((468, 488), 'os.path.join', 'os.path.join', (['dir', 'f'], {}), '(dir, f)\n', (480, 488), False, 'import os\n'), ((631, 657), 'numpy.array', 'np.array', (['(spectro, label)'], {}), '((spectro, label))\n', (639, 657), True, 'import numpy as np\n'), ((397, 416), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (413, 416), False, 'import os\n')] |
"""
Utilities for randomness.
Complete docstrings.
"""
import ctypes
import multiprocessing
import os
import random
import subprocess
import time
import traceback
from timeit import default_timer as timer
from typing import Any, Dict, List, Tuple
import GPUtil
import numpy as np
import psutil
import torch as th
import torch.backends.cudnn as cudnn
import tqdm
from torch import cuda
# ---------- Multiprocessing ----------
MAP_TYPES: Dict[str, Any] = {
'int': ctypes.c_int,
'long': ctypes.c_long,
'float': ctypes.c_float,
'double': ctypes.c_double
}
def create_shared_array(arr: np.ndarray, dtype: str = "float") -> np.array:
"""
Converts an existing numpy array into a shared numpy array, such that
this array can be used by multiple CPUs. Used e.g. for preloading the
entire feature dataset into memory and then making it available to multiple
dataloaders.
Args:
arr (np.ndarray): Array to be converted to shared array
dtype (np.dtype): Datatype of shared array
Returns:
shared_array (multiprocessing.Array): shared array
"""
shape = arr.shape
flat_shape = int(np.prod(np.array(shape)))
c_type = MAP_TYPES[dtype]
shared_array_base = multiprocessing.Array(c_type, flat_shape)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(shape)
shared_array[:] = arr[:]
return shared_array
# ---------- Random ----------
def set_seed(seed: int, set_deterministic: bool = True):
"""
Set all relevant seeds for torch, numpy and python
Args:
seed: int seed
set_deterministic: Guarantee deterministic training, possibly at the cost of performance.
"""
th.manual_seed(seed)
cuda.manual_seed(seed)
cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
if set_deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
elif cudnn.benchmark or not cudnn.deterministic:
print(f"WARNING: Despite fixed seed {seed}, training may not be deterministic with {cudnn.benchmark=} "
f"(must be False for deterministic training) and {cudnn.deterministic=} (must be True for deterministic "
f"training)")
def get_truncnorm_tensor(shape: Tuple[int], *, mean: float = 0, std: float = 1, limit: float = 2) -> th.Tensor:
"""
Create and return normally distributed tensor, except values with too much deviation are discarded.
Args:
shape: tensor shape
mean: normal mean
std: normal std
limit: which values to discard
Returns:
Filled tensor with shape (*shape)
"""
assert isinstance(shape, (tuple, list)), f"shape {shape} is not a tuple or list of ints"
num_examples = 8
tmp = th.empty(shape + (num_examples,)).normal_()
valid = (tmp < limit) & (tmp > -limit)
_, ind = valid.max(-1, keepdim=True)
return tmp.gather(-1, ind).squeeze(-1).mul_(std).add_(mean)
def fill_tensor_with_truncnorm(input_tensor: th.Tensor, *, mean: float = 0, std: float = 1, limit: float = 2) -> None:
"""
Fill given input tensor with a truncated normal dist.
Args:
input_tensor: tensor to be filled
mean: normal mean
std: normal std
limit: which values to discard
"""
# get truncnorm values
tmp = get_truncnorm_tensor(input_tensor.shape, mean=mean, std=std, limit=limit)
# fill input tensor
input_tensor[...] = tmp[...]
# ---------- Profiling ----------
def profile_gpu_and_ram() -> Tuple[List[str], List[float], List[float], List[float], float, float, float]:
"""
Profile GPU and RAM.
Returns:
GPU names, total / used memory per GPU, load per GPU, total / used / available RAM.
"""
# get info from gputil
_str, dct_ = _get_gputil_info()
dev_num = os.getenv("CUDA_VISIBLE_DEVICES")
if dev_num is not None:
# single GPU set with OS flag
gpu_info = [dct_[int(dev_num)]]
else:
# possibly multiple gpus, aggregate values
gpu_info = []
for dev_dict in dct_:
gpu_info.append(dev_dict)
# convert to GPU info and MB to GB
gpu_names: List[str] = [gpu["name"] for gpu in gpu_info]
total_memory_per: List[float] = [gpu["memoryTotal"] / 1024 for gpu in gpu_info]
used_memory_per: List[float] = [gpu["memoryUsed"] / 1024 for gpu in gpu_info]
load_per: List[float] = [gpu["load"] / 100 for gpu in gpu_info]
# get RAM info and convert to GB
mem = psutil.virtual_memory()
ram_total: float = mem.total / 1024 ** 3
ram_used: float = mem.used / 1024 ** 3
ram_avail: float = mem.available / 1024 ** 3
return gpu_names, total_memory_per, used_memory_per, load_per, ram_total, ram_used, ram_avail
def _get_gputil_info():
"""
Returns info string for printing and list with gpu infos. Better formatting than the original GPUtil.
Returns:
gpu info string, List[Dict()] of values. dict example:
('id', 1),
('name', 'GeForce GTX TITAN X'),
('temperature', 41.0),
('load', 0.0),
('memoryUtil', 0.10645266950540452),
('memoryTotal', 12212.0)])]
"""
gpus = GPUtil.getGPUs()
attr_list = [
{'attr': 'id', 'name': 'ID'}, {'attr': 'name', 'name': 'Name'},
{'attr': 'temperature', 'name': 'Temp', 'suffix': 'C', 'transform': lambda x: x, 'precision': 0},
{'attr': 'load', 'name': 'GPU util.', 'suffix': '% GPU', 'transform': lambda x: x * 100,
'precision': 1},
{'attr': 'memoryUtil', 'name': 'Memory util.', 'suffix': '% MEM', 'transform': lambda x: x * 100,
'precision': 1}, {'attr': 'memoryTotal', 'name': 'Memory total', 'suffix': 'MB', 'precision': 0},
{'attr': 'memoryUsed', 'name': 'Memory used', 'suffix': 'MB', 'precision': 0}
]
gpu_strings = [''] * len(gpus)
gpu_info = []
for _ in range(len(gpus)):
gpu_info.append({})
for attrDict in attr_list:
attr_precision = '.' + str(attrDict['precision']) if (
'precision' in attrDict.keys()) else ''
attr_suffix = str(attrDict['suffix']) if (
'suffix' in attrDict.keys()) else ''
attr_transform = attrDict['transform'] if (
'transform' in attrDict.keys()) else lambda x: x
for gpu in gpus:
attr = getattr(gpu, attrDict['attr'])
attr = attr_transform(attr)
if isinstance(attr, float):
attr_str = ('{0:' + attr_precision + 'f}').format(attr)
elif isinstance(attr, int):
attr_str = '{0:d}'.format(attr)
elif isinstance(attr, str):
attr_str = attr
else:
raise TypeError('Unhandled object type (' + str(
type(attr)) + ') for attribute \'' + attrDict[
'name'] + '\'')
attr_str += attr_suffix
for gpuIdx, gpu in enumerate(gpus):
attr_name = attrDict['attr']
attr = getattr(gpu, attr_name)
attr = attr_transform(attr)
if isinstance(attr, float):
attr_str = ('{0:' + attr_precision + 'f}').format(attr)
elif isinstance(attr, int):
attr_str = ('{0:' + 'd}').format(attr)
elif isinstance(attr, str):
attr_str = ('{0:' + 's}').format(attr)
else:
raise TypeError(
'Unhandled object type (' + str(
type(attr)) + ') for attribute \'' + attrDict[
'name'] + '\'')
attr_str += attr_suffix
gpu_info[gpuIdx][attr_name] = attr
gpu_strings[gpuIdx] += '| ' + attr_str + ' '
return "\n".join(gpu_strings), gpu_info
| [
"psutil.virtual_memory",
"GPUtil.getGPUs",
"numpy.random.seed",
"multiprocessing.Array",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.empty",
"torch.cuda.manual_seed_all",
"random.seed",
"numpy.array",
"os.getenv"
] | [((1289, 1330), 'multiprocessing.Array', 'multiprocessing.Array', (['c_type', 'flat_shape'], {}), '(c_type, flat_shape)\n', (1310, 1330), False, 'import multiprocessing\n'), ((1816, 1836), 'torch.manual_seed', 'th.manual_seed', (['seed'], {}), '(seed)\n', (1830, 1836), True, 'import torch as th\n'), ((1842, 1864), 'torch.cuda.manual_seed', 'cuda.manual_seed', (['seed'], {}), '(seed)\n', (1858, 1864), False, 'from torch import cuda\n'), ((1870, 1896), 'torch.cuda.manual_seed_all', 'cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1890, 1896), False, 'from torch import cuda\n'), ((1902, 1922), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1916, 1922), True, 'import numpy as np\n'), ((1928, 1945), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1939, 1945), False, 'import random\n'), ((4019, 4052), 'os.getenv', 'os.getenv', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (4028, 4052), False, 'import os\n'), ((4710, 4733), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (4731, 4733), False, 'import psutil\n'), ((5448, 5464), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (5462, 5464), False, 'import GPUtil\n'), ((1215, 1230), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (1223, 1230), True, 'import numpy as np\n'), ((2919, 2952), 'torch.empty', 'th.empty', (['(shape + (num_examples,))'], {}), '(shape + (num_examples,))\n', (2927, 2952), True, 'import torch as th\n')] |
'''
Created on Feb 27, 2015
@author: <NAME>
'''
from . import const
import numpy as np
from . import cosmology as cm
def noise_error_ps(nu_c, k, t, **kwargs):
'''
Calculate the system noise error on the
power spectrum, using the analytical expression in
Mellema et al 2013 (equation 11). If no arguments are given,
the noise is calculated for LOFAR-like parameters
with delta k = k, and a bandwidth of 10 MHz.
Parameters:
* nu_c (float): the central observing frequency
* k (float or array-like): the k mode(s)
* t (float): the observing time in hours
Valid kwargs:
* Rmax (float): the radius of the array in meters
* Aeff (float or function): the effective area. Can be a
function of nu.
* Nstat (int): the number of stations
* Tsys (float or function): the system temperature. Can be a
function of nu.
* B (float): the bandwidth in MHz
* epsilon (float): the width of the k bins in terms of k
* multipole (int): if this is zero (default), the
noise is calculated for the monopole (spherially-averaged).
Otherwise it is calculated for the given multipole moment
of the power spectrum.
Returns:
The system noise error in mK^2
'''
wavel = const.c/nu_c*1.e-3
t = t*60.*60. #s
Rmax = kwargs.get('Rmax', 1500.)
Acore = Rmax**2*np.pi #m^2
Aeff = kwargs.get('Aeff', lambda nu: 526.*(nu/150.)**(-2))
if hasattr(Aeff, '__call__'):
Aeff_val = Aeff(nu_c)
else:
Aeff_val = Aeff
Nstat = kwargs.get('Nstat', 48)
Acoll = Nstat*Aeff_val
B = kwargs.get('B', 10.)
Dc = cm.nu_to_cdist(nu_c)
deltaDc = np.abs(Dc - cm.nu_to_cdist(nu_c+B))
OmegaFoV = wavel**2/Aeff_val
Tsys = kwargs.get('Tsys', lambda nu: (140. + 60.*(nu/300.)**(-2.55))*1000.)
if hasattr(Tsys, '__call__'):
Tsys_val = Tsys(nu_c)
else:
Tsys_val = Tsys
epsilon = kwargs.get('epsilon', 1)
multipole = kwargs.get('multipole', 0)
multipole_factor = np.sqrt(2*multipole+1)
Delta_noise = (2./np.pi)*k**(3./2.)*np.sqrt(Dc**2*deltaDc*OmegaFoV)*(Tsys_val/np.sqrt(B*1.e6*t))**2*(Acore*Aeff_val/(Acoll**2))/np.sqrt(epsilon)
return Delta_noise*multipole_factor
| [
"numpy.sqrt"
] | [((2112, 2138), 'numpy.sqrt', 'np.sqrt', (['(2 * multipole + 1)'], {}), '(2 * multipole + 1)\n', (2119, 2138), True, 'import numpy as np\n'), ((2267, 2283), 'numpy.sqrt', 'np.sqrt', (['epsilon'], {}), '(epsilon)\n', (2274, 2283), True, 'import numpy as np\n'), ((2175, 2212), 'numpy.sqrt', 'np.sqrt', (['(Dc ** 2 * deltaDc * OmegaFoV)'], {}), '(Dc ** 2 * deltaDc * OmegaFoV)\n', (2182, 2212), True, 'import numpy as np\n'), ((2217, 2243), 'numpy.sqrt', 'np.sqrt', (['(B * 1000000.0 * t)'], {}), '(B * 1000000.0 * t)\n', (2224, 2243), True, 'import numpy as np\n')] |
import distutils.version as vers
import pytest
from numpy.testing import assert_allclose
import astropy.version as astrov
from astropy.utils.data import get_pkg_data_filename
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
from ..read import FITSRegionParser, read_fits_region
from ..write import fits_region_objects_to_table
from ..core import FITSRegionParserError
from ...core import to_shape_list
from ....shapes.circle import CircleSkyRegion
implemented_region_types = ('ellipse', 'circle', 'box', 'polygon', 'point',
'annulus', 'elliptannulus')
@pytest.mark.parametrize('filename', ['data/fits_region.fits'])
def test_file_fits(filename):
filename = get_pkg_data_filename(filename)
table = Table.read(filename)
shapes = FITSRegionParser(table, 'warn').shapes
assert shapes[0].region_type == 'circle'
assert shapes[1].region_type == 'rectangle'
assert shapes[2].region_type == 'ellipse'
assert shapes[3].region_type == 'rectangle'
assert shapes[4].region_type == 'circleannulus'
assert shapes[5].region_type == 'point'
assert shapes[6].region_type == 'point'
assert shapes[7].region_type == 'polygon'
for x in range(7):
assert_allclose(shapes[x].coord[:2],
[table['X'][x][0], table['Y'][x][0]])
assert_allclose(shapes[7].coord, [1, 5, 2, 6, 3, 7, 4, 8])
assert_allclose(shapes[0].coord[2:], [table['R'][0][0]])
for x in range(1, 4):
assert_allclose([val.value for val in shapes[x].coord[2:]],
list(table['R'][x][:2]) + [table['ROTANG'][x]])
regs = shapes.to_regions()
table_ouput = fits_region_objects_to_table(regs)
shape_ouput = FITSRegionParser(table_ouput).shapes
for i in range(len(shapes)):
assert shapes[i].region_type == shape_ouput[i].region_type
assert shapes[i].coord == shape_ouput[i].coord
assert shapes[i].meta == shape_ouput[i].meta
# Reading the regions directly from file and converting to sky regions.
regs_sky = read_fits_region(filename)
header = fits.open(filename)[1].header
wcs = WCS(header, keysel=['image', 'binary', 'pixel'])
regs_pix = [reg.to_pixel(wcs) for reg in regs_sky]
shapes_roundtrip = to_shape_list(regs_pix, 'image')
for i in range(len(shapes)):
assert shapes[i].region_type == shapes_roundtrip[i].region_type
assert_allclose(shapes[i].coord[:-1], shapes_roundtrip[i].coord[:-1])
def test_only_pixel_regions():
reg_sky = CircleSkyRegion(SkyCoord(1, 2, unit='deg'), 5 * u.deg)
with pytest.raises(TypeError) as err:
fits_region_objects_to_table([reg_sky])
print(str(err))
assert 'Every region must be a pixel region' in str(err)
def test_valid_columns():
t = Table([[1, 2, 3]], names=('a'))
with pytest.raises(FITSRegionParserError) as err:
FITSRegionParser(t)
assert "This table has an invalid column name: 'a'" in str(err)
def test_valid_row():
x = [1]
y = [2]
shapes = ['CIRCLE']
t = Table([x, y, shapes], names=('X', 'Y', 'SHAPE'))
t['X'].unit = 'pix'
t['Y'].unit = 'pix'
with pytest.raises(FITSRegionParserError) as err:
FITSRegionParser(t)
assert "The column: 'R' is missing in the table" in str(err)
t[0]['SHAPE'] = 'PONT'
with pytest.raises(FITSRegionParserError) as err:
FITSRegionParser(t)
assert "'PONT' is not a valid FITS Region type" in str(err)
t['ROTANG'] = [[20, 30]]
t['ROTANG'].unit = 'deg'
t[0]['SHAPE'] = 'PIE'
with pytest.raises(FITSRegionParserError) as err:
FITSRegionParser(t)
assert "'PIE' is currently not supported in regions" in str(err)
| [
"astropy.table.Table",
"numpy.testing.assert_allclose",
"astropy.utils.data.get_pkg_data_filename",
"astropy.wcs.WCS",
"pytest.raises",
"astropy.io.fits.open",
"pytest.mark.parametrize",
"astropy.coordinates.SkyCoord",
"astropy.table.Table.read"
] | [((699, 761), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filename"""', "['data/fits_region.fits']"], {}), "('filename', ['data/fits_region.fits'])\n", (722, 761), False, 'import pytest\n'), ((808, 839), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['filename'], {}), '(filename)\n', (829, 839), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((852, 872), 'astropy.table.Table.read', 'Table.read', (['filename'], {}), '(filename)\n', (862, 872), False, 'from astropy.table import Table\n'), ((1436, 1494), 'numpy.testing.assert_allclose', 'assert_allclose', (['shapes[7].coord', '[1, 5, 2, 6, 3, 7, 4, 8]'], {}), '(shapes[7].coord, [1, 5, 2, 6, 3, 7, 4, 8])\n', (1451, 1494), False, 'from numpy.testing import assert_allclose\n'), ((1500, 1556), 'numpy.testing.assert_allclose', 'assert_allclose', (['shapes[0].coord[2:]', "[table['R'][0][0]]"], {}), "(shapes[0].coord[2:], [table['R'][0][0]])\n", (1515, 1556), False, 'from numpy.testing import assert_allclose\n'), ((2246, 2294), 'astropy.wcs.WCS', 'WCS', (['header'], {'keysel': "['image', 'binary', 'pixel']"}), "(header, keysel=['image', 'binary', 'pixel'])\n", (2249, 2294), False, 'from astropy.wcs import WCS\n'), ((2903, 2932), 'astropy.table.Table', 'Table', (['[[1, 2, 3]]'], {'names': '"""a"""'}), "([[1, 2, 3]], names='a')\n", (2908, 2932), False, 'from astropy.table import Table\n'), ((3168, 3216), 'astropy.table.Table', 'Table', (['[x, y, shapes]'], {'names': "('X', 'Y', 'SHAPE')"}), "([x, y, shapes], names=('X', 'Y', 'SHAPE'))\n", (3173, 3216), False, 'from astropy.table import Table\n'), ((1332, 1406), 'numpy.testing.assert_allclose', 'assert_allclose', (['shapes[x].coord[:2]', "[table['X'][x][0], table['Y'][x][0]]"], {}), "(shapes[x].coord[:2], [table['X'][x][0], table['Y'][x][0]])\n", (1347, 1406), False, 'from numpy.testing import assert_allclose\n'), ((2520, 2589), 'numpy.testing.assert_allclose', 'assert_allclose', (['shapes[i].coord[:-1]', 'shapes_roundtrip[i].coord[:-1]'], {}), '(shapes[i].coord[:-1], shapes_roundtrip[i].coord[:-1])\n', (2535, 2589), False, 'from numpy.testing import assert_allclose\n'), ((2654, 2680), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(1)', '(2)'], {'unit': '"""deg"""'}), "(1, 2, unit='deg')\n", (2662, 2680), False, 'from astropy.coordinates import SkyCoord\n'), ((2703, 2727), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2716, 2727), False, 'import pytest\n'), ((2945, 2981), 'pytest.raises', 'pytest.raises', (['FITSRegionParserError'], {}), '(FITSRegionParserError)\n', (2958, 2981), False, 'import pytest\n'), ((3275, 3311), 'pytest.raises', 'pytest.raises', (['FITSRegionParserError'], {}), '(FITSRegionParserError)\n', (3288, 3311), False, 'import pytest\n'), ((3452, 3488), 'pytest.raises', 'pytest.raises', (['FITSRegionParserError'], {}), '(FITSRegionParserError)\n', (3465, 3488), False, 'import pytest\n'), ((3685, 3721), 'pytest.raises', 'pytest.raises', (['FITSRegionParserError'], {}), '(FITSRegionParserError)\n', (3698, 3721), False, 'import pytest\n'), ((2206, 2225), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (2215, 2225), False, 'from astropy.io import fits\n')] |
"""
Estimates the CPU time required for a phosim simulation of a 30-s visit. The
inputs are filter, moonalt, and moonphase, or obsHistID (an Opsim ID from
a specified (hard coded) Opsim sqlite database.
The random forest is generated (and saved as a pickle file) by
run1_cpu_generate_rf.py, using only the filter, moonalt, and moonphase
features. This script needs to be run only once.
"""
from __future__ import print_function, absolute_import
import os
import pickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab
from .sqlite_tools import SqliteDataFrameFactory
__all__ = ['CpuPred']
class CpuPred(object):
"""
Returns predicted fell-class CPU time in seconds for user-supplied
filter (0-5 for ugrizy), moon altitude (deg), and moon phase (0-100)
-or- ObsHistID from the kraken_1042 Opsim run. By default the code
looks for the sqlite database for kraken_1042 in a specific location on
SLAC Linux. Also by default it looks only for obsHistID values that are
in the Twinkles Run 1 field (by selecting on the corresponding fieldID).
RF_pickle.p is written by run1_cpu_generate_rf.py
"""
def __init__(self, rf_pickle_file='RF_pickle.p',
opsim_db_file='/nfs/farm/g/lsst/u1/DESC/Twinkles/kraken_1042_sqlite.db',
opsim_df = None,
fieldID=1427):
self.RFbest = pickle.load(open(rf_pickle_file, 'rb'))
if opsim_df is None:
factory = SqliteDataFrameFactory(opsim_db_file)
self.obs_conditions = factory.create('obsHistID filter moonAlt moonPhase'.split(), 'Summary',
condition='where fieldID=%d'%fieldID)
else:
self.obs_conditions = opsim_df['obsHistID filter moonAlt moonPhase'.split()]
def __call__(self, obsid):
"""
Return the predicted CPU time given an obsHistID. The obsHistID
must be in the Opsim database file and fieldID specified on
initialization of the instance.
"""
filter_index, moonalt, moonphase = self.conditions(obsid)
return self.cputime(filter_index, moonalt, moonphase)
def conditions(self, obsid):
"""
Return the relevant observing conditions (i.e., those that are
inputs to the Random Forest predictor) for the given obsHistID
"""
rec = self.obs_conditions[self.obs_conditions['obsHistID'] == obsid]
if rec.size != 0:
# Translate the filter string into an index 0-5
filter_index = 'ugrizy'.find(rec['filter'].values[0])
moonalt = math.degrees(rec['moonAlt'].values[0])
moonphase = rec['moonPhase'].values[0]
else:
raise RuntimeError('%d is not a Run 1 obsHistID in field %d'%obsid,fieldID)
return filter_index, moonalt, moonphase
def cputime(self, filter_index, moonalt, moonphase):
return 10.**self.RFbest.predict(np.array([[filter_index, moonalt,
moonphase]]))
if __name__ == '__main__':
# Here are some dumb examples
pred = CpuPred()
print(pred(210))
print(pred.cputime(3.,10.,50.))
# This one won't work
#pred(-999)
# Extract the Run 1 metadata and evaluate the predicted CPU times
run1meta = pd.read_csv(os.path.join(os.environ['TWINKLES_DIR'], 'data',
'run1_metadata_v6.csv'),
usecols=['filter', 'moonalt','moonphase','cputime_fell'])
filter = np.array(run1meta['filter'])
moonalt = np.array(run1meta['moonalt'])
moonphase = np.array(run1meta['moonphase'])
actual = np.array(run1meta['cputime_fell'])
predicted = np.zeros(filter.size,dtype=float)
for i in range(filter.size):
predicted[i] = pred.cputime(filter[i],moonalt[i],moonphase[i])
plt.scatter(np.log10(actual), np.log10(predicted))
plt.plot([4,6.5],[4,6.5])
pylab.ylim([4,6.5])
pylab.xlim([4,6.5])
plt.xlabel('log10(Actual Fell CPU time, s)')
plt.ylabel('log10(Predicted Fell CPU time, s)')
plt.title('Run 1 CPU Times Predicted vs. Actual')
pylab.savefig('predicted_vs_actual.png',bbox_inches='tight')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"math.degrees",
"pylab.savefig",
"numpy.array",
"pylab.ylim",
"pylab.xlim",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"os.path.join"
] | [((3447, 3475), 'numpy.array', 'np.array', (["run1meta['filter']"], {}), "(run1meta['filter'])\n", (3455, 3475), True, 'import numpy as np\n'), ((3490, 3519), 'numpy.array', 'np.array', (["run1meta['moonalt']"], {}), "(run1meta['moonalt'])\n", (3498, 3519), True, 'import numpy as np\n'), ((3536, 3567), 'numpy.array', 'np.array', (["run1meta['moonphase']"], {}), "(run1meta['moonphase'])\n", (3544, 3567), True, 'import numpy as np\n'), ((3581, 3615), 'numpy.array', 'np.array', (["run1meta['cputime_fell']"], {}), "(run1meta['cputime_fell'])\n", (3589, 3615), True, 'import numpy as np\n'), ((3633, 3667), 'numpy.zeros', 'np.zeros', (['filter.size'], {'dtype': 'float'}), '(filter.size, dtype=float)\n', (3641, 3667), True, 'import numpy as np\n'), ((3832, 3860), 'matplotlib.pyplot.plot', 'plt.plot', (['[4, 6.5]', '[4, 6.5]'], {}), '([4, 6.5], [4, 6.5])\n', (3840, 3860), True, 'import matplotlib.pyplot as plt\n'), ((3862, 3882), 'pylab.ylim', 'pylab.ylim', (['[4, 6.5]'], {}), '([4, 6.5])\n', (3872, 3882), False, 'import pylab\n'), ((3886, 3906), 'pylab.xlim', 'pylab.xlim', (['[4, 6.5]'], {}), '([4, 6.5])\n', (3896, 3906), False, 'import pylab\n'), ((3910, 3954), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log10(Actual Fell CPU time, s)"""'], {}), "('log10(Actual Fell CPU time, s)')\n", (3920, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3959, 4006), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log10(Predicted Fell CPU time, s)"""'], {}), "('log10(Predicted Fell CPU time, s)')\n", (3969, 4006), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4060), 'matplotlib.pyplot.title', 'plt.title', (['"""Run 1 CPU Times Predicted vs. Actual"""'], {}), "('Run 1 CPU Times Predicted vs. Actual')\n", (4020, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4065, 4126), 'pylab.savefig', 'pylab.savefig', (['"""predicted_vs_actual.png"""'], {'bbox_inches': '"""tight"""'}), "('predicted_vs_actual.png', bbox_inches='tight')\n", (4078, 4126), False, 'import pylab\n'), ((4130, 4140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4138, 4140), True, 'import matplotlib.pyplot as plt\n'), ((3285, 3357), 'os.path.join', 'os.path.join', (["os.environ['TWINKLES_DIR']", '"""data"""', '"""run1_metadata_v6.csv"""'], {}), "(os.environ['TWINKLES_DIR'], 'data', 'run1_metadata_v6.csv')\n", (3297, 3357), False, 'import os\n'), ((3789, 3805), 'numpy.log10', 'np.log10', (['actual'], {}), '(actual)\n', (3797, 3805), True, 'import numpy as np\n'), ((3807, 3826), 'numpy.log10', 'np.log10', (['predicted'], {}), '(predicted)\n', (3815, 3826), True, 'import numpy as np\n'), ((2604, 2642), 'math.degrees', 'math.degrees', (["rec['moonAlt'].values[0]"], {}), "(rec['moonAlt'].values[0])\n", (2616, 2642), False, 'import math\n'), ((2943, 2989), 'numpy.array', 'np.array', (['[[filter_index, moonalt, moonphase]]'], {}), '([[filter_index, moonalt, moonphase]])\n', (2951, 2989), True, 'import numpy as np\n')] |
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
from os import path
import numpy as np
import pandas as pd
from tqdm import tqdm
import pickle
from deepface.commons import functions, distance as dst
from deepface.DeepFace import represent, build_model
def find(img_path, db_path = "data/__local_dataset__", model_name ='VGG-Face', distance_metric = 'cosine', model = None, enforce_detection = False, detector_backend = 'opencv', align = True, prog_bar = True):
"""
This function applies verification several times and find an identity in a database
Parameters:
img_path: exact image path, numpy array or based64 encoded image. If you are going to find several identities, then you should pass img_path as array instead of calling find function in a for loop. e.g. img_path = ["img1.jpg", "img2.jpg"]
db_path (string): You should store some .jpg files in a folder and pass the exact folder path to this.
model_name (string): VGG-Face, Facenet, OpenFace, DeepFace, DeepID, Dlib or Ensemble
distance_metric (string): cosine, euclidean, euclidean_l2
model: built deepface model. A face recognition models are built in every call of find function. You can pass pre-built models to speed the function up.
model = DeepFace.build_model('VGG-Face')
enforce_detection (boolean): The function throws exception if a face could not be detected. Set this to True if you don't want to get exception. This might be convenient for low resolution images.
detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib
prog_bar (boolean): enable/disable a progress bar
Returns:
This function returns pandas data frame. If a list of images is passed to img_path, then it will return list of pandas data frame.
"""
tic = time.time()
img_paths, bulkProcess = functions.initialize_input(img_path)
#-------------------------------
if os.path.isdir(db_path) == True:
if model == None:
if model_name == 'Ensemble':
print("Ensemble learning enabled")
models = Boosting.loadModel()
else: #model is not ensemble
model = build_model(model_name)
models = {}
models[model_name] = model
else: #model != None
print("Already built model is passed")
if model_name == 'Ensemble':
Boosting.validate_model(model)
models = model.copy()
else:
models = {}
models[model_name] = model
#---------------------------------------
if model_name == 'Ensemble':
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
metric_names = ['cosine', 'euclidean', 'euclidean_l2']
elif model_name != 'Ensemble':
model_names = []; metric_names = []
model_names.append(model_name)
metric_names.append(distance_metric)
#---------------------------------------
file_name = "representations_%s.pkl" % (model_name)
file_name = file_name.replace("-", "_").lower()
if path.exists(db_path+"/"+file_name):
#print("WARNING: Representations for images in ",db_path," folder were previously stored in ", file_name, ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again.")
f = open(db_path+'/'+file_name, 'rb')
representations = pickle.load(f)
print("There are ", len(representations)," representations found in ",file_name)
else: #create representation.pkl from scratch
employees = []
for r, d, f in os.walk(db_path): # r=root, d=directories, f = files
for file in f:
if ('.jpg' in file.lower()) or ('.png' in file.lower()):
exact_path = r + "/" + file
employees.append(exact_path)
if len(employees) == 0:
raise ValueError("There is no image in ", db_path," folder! Validate .jpg or .png files exist in this path.")
#------------------------
#find representations for db images
representations = []
pbar = tqdm(range(0,len(employees)), desc='Finding representations', disable = prog_bar)
#for employee in employees:
for index in pbar:
employee = employees[index]
instance = []
instance.append(employee)
for j in model_names:
custom_model = models[j]
representation = represent(img_path = employee
, model_name = model_name, model = custom_model
, enforce_detection = enforce_detection, detector_backend = detector_backend
, align = align)
instance.append(representation)
#-------------------------------
representations.append(instance)
f = open(db_path+'/'+file_name, "wb")
pickle.dump(representations, f)
f.close()
print("Representations stored in ",db_path,"/",file_name," file. Please delete this file when you add new identities in your database.")
#----------------------------
#now, we got representations for facial database
if model_name != 'Ensemble':
df = pd.DataFrame(representations, columns = ["identity", "%s_representation" % (model_name)])
else: #ensemble learning
columns = ['identity']
[columns.append('%s_representation' % i) for i in model_names]
df = pd.DataFrame(representations, columns = columns)
df_base = df.copy() #df will be filtered in each img. we will restore it for the next item.
resp_obj = []
global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing', disable = prog_bar)
for j in global_pbar:
img_path = img_paths[j]
#find representation for passed image
for j in model_names:
custom_model = models[j]
target_representation = represent(img_path = img_path
, model_name = model_name, model = custom_model
, enforce_detection = enforce_detection, detector_backend = detector_backend
, align = align)
for k in metric_names:
distances = []
for index, instance in df.iterrows():
source_representation = instance["%s_representation" % (j)]
if k == 'cosine':
distance = dst.findCosineDistance(source_representation, target_representation)
elif k == 'euclidean':
distance = dst.findEuclideanDistance(source_representation, target_representation)
elif k == 'euclidean_l2':
distance = dst.findEuclideanDistance(dst.l2_normalize(source_representation), dst.l2_normalize(target_representation))
distances.append(distance)
#---------------------------
if model_name == 'Ensemble' and j == 'OpenFace' and k == 'euclidean':
continue
else:
df["%s_%s" % (j, k)] = distances
if model_name != 'Ensemble':
threshold = dst.findThreshold(j, k)
df = df.drop(columns = ["%s_representation" % (j)])
df = df[df["%s_%s" % (j, k)] <= threshold]
df = df.sort_values(by = ["%s_%s" % (j, k)], ascending=True).reset_index(drop=True)
resp_obj.append(df)
df = df_base.copy() #restore df for the next iteration
#----------------------------------
if model_name == 'Ensemble':
feature_names = []
for j in model_names:
for k in metric_names:
if model_name == 'Ensemble' and j == 'OpenFace' and k == 'euclidean':
continue
else:
feature = '%s_%s' % (j, k)
feature_names.append(feature)
#print(df.head())
x = df[feature_names].values
#--------------------------------------
boosted_tree = Boosting.build_gbm()
y = boosted_tree.predict(x)
verified_labels = []; scores = []
for i in y:
verified = np.argmax(i) == 1
score = i[np.argmax(i)]
verified_labels.append(verified)
scores.append(score)
df['verified'] = verified_labels
df['score'] = scores
df = df[df.verified == True]
#df = df[df.score > 0.99] #confidence score
df = df.sort_values(by = ["score"], ascending=False).reset_index(drop=True)
df = df[['identity', 'verified', 'score']]
resp_obj.append(df)
df = df_base.copy() #restore df for the next iteration
#----------------------------------
toc = time.time()
print("find function lasts ",toc-tic," seconds")
if len(resp_obj) == 1:
return resp_obj[0]
return resp_obj
else:
raise ValueError("Passed db_path does not exist!")
return None | [
"pandas.DataFrame",
"pickle.dump",
"deepface.commons.distance.findThreshold",
"warnings.filterwarnings",
"os.path.isdir",
"numpy.argmax",
"os.walk",
"os.path.exists",
"deepface.commons.distance.l2_normalize",
"time.time",
"deepface.DeepFace.build_model",
"pickle.load",
"deepface.DeepFace.rep... | [((17, 50), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (40, 50), False, 'import warnings\n'), ((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((1872, 1908), 'deepface.commons.functions.initialize_input', 'functions.initialize_input', (['img_path'], {}), '(img_path)\n', (1898, 1908), False, 'from deepface.commons import functions, distance as dst\n'), ((1949, 1971), 'os.path.isdir', 'os.path.isdir', (['db_path'], {}), '(db_path)\n', (1962, 1971), False, 'import os\n'), ((2940, 2978), 'os.path.exists', 'path.exists', (["(db_path + '/' + file_name)"], {}), "(db_path + '/' + file_name)\n", (2951, 2978), False, 'from os import path\n'), ((7929, 7940), 'time.time', 'time.time', ([], {}), '()\n', (7938, 7940), False, 'import time\n'), ((3291, 3305), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3302, 3305), False, 'import pickle\n'), ((3477, 3493), 'os.walk', 'os.walk', (['db_path'], {}), '(db_path)\n', (3484, 3493), False, 'import os\n'), ((4573, 4604), 'pickle.dump', 'pickle.dump', (['representations', 'f'], {}), '(representations, f)\n', (4584, 4604), False, 'import pickle\n'), ((4883, 4972), 'pandas.DataFrame', 'pd.DataFrame', (['representations'], {'columns': "['identity', '%s_representation' % model_name]"}), "(representations, columns=['identity', '%s_representation' %\n model_name])\n", (4895, 4972), True, 'import pandas as pd\n'), ((5102, 5148), 'pandas.DataFrame', 'pd.DataFrame', (['representations'], {'columns': 'columns'}), '(representations, columns=columns)\n', (5114, 5148), True, 'import pandas as pd\n'), ((2153, 2176), 'deepface.DeepFace.build_model', 'build_model', (['model_name'], {}), '(model_name)\n', (2164, 2176), False, 'from deepface.DeepFace import represent, build_model\n'), ((5526, 5690), 'deepface.DeepFace.represent', 'represent', ([], {'img_path': 'img_path', 'model_name': 'model_name', 'model': 'custom_model', 'enforce_detection': 'enforce_detection', 'detector_backend': 'detector_backend', 'align': 'align'}), '(img_path=img_path, model_name=model_name, model=custom_model,\n enforce_detection=enforce_detection, detector_backend=detector_backend,\n align=align)\n', (5535, 5690), False, 'from deepface.DeepFace import represent, build_model\n'), ((4224, 4388), 'deepface.DeepFace.represent', 'represent', ([], {'img_path': 'employee', 'model_name': 'model_name', 'model': 'custom_model', 'enforce_detection': 'enforce_detection', 'detector_backend': 'detector_backend', 'align': 'align'}), '(img_path=employee, model_name=model_name, model=custom_model,\n enforce_detection=enforce_detection, detector_backend=detector_backend,\n align=align)\n', (4233, 4388), False, 'from deepface.DeepFace import represent, build_model\n'), ((7412, 7424), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (7421, 7424), True, 'import numpy as np\n'), ((7445, 7457), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (7454, 7457), True, 'import numpy as np\n'), ((5913, 5981), 'deepface.commons.distance.findCosineDistance', 'dst.findCosineDistance', (['source_representation', 'target_representation'], {}), '(source_representation, target_representation)\n', (5935, 5981), True, 'from deepface.commons import functions, distance as dst\n'), ((6524, 6547), 'deepface.commons.distance.findThreshold', 'dst.findThreshold', (['j', 'k'], {}), '(j, k)\n', (6541, 6547), True, 'from deepface.commons import functions, distance as dst\n'), ((6029, 6100), 'deepface.commons.distance.findEuclideanDistance', 'dst.findEuclideanDistance', (['source_representation', 'target_representation'], {}), '(source_representation, target_representation)\n', (6054, 6100), True, 'from deepface.commons import functions, distance as dst\n'), ((6177, 6216), 'deepface.commons.distance.l2_normalize', 'dst.l2_normalize', (['source_representation'], {}), '(source_representation)\n', (6193, 6216), True, 'from deepface.commons import functions, distance as dst\n'), ((6218, 6257), 'deepface.commons.distance.l2_normalize', 'dst.l2_normalize', (['target_representation'], {}), '(target_representation)\n', (6234, 6257), True, 'from deepface.commons import functions, distance as dst\n')] |
"""
A unit test for the pyxsim analysis module.
"""
from pyxsim import \
TableApecModel, TBabsModel, \
ThermalSourceModel, PhotonList
from pyxsim.instruments import \
Lynx_Calorimeter
from pyxsim.tests.utils import \
BetaModelSource, ParticleBetaModelSource
from yt.testing import requires_module
import numpy as np
from yt.utilities.physical_constants import clight
import os
import tempfile
import shutil
from sherpa.astro.ui import load_user_model, add_user_pars, \
load_pha, ignore, fit, set_model, set_stat, set_method, \
get_fit_results
from six import string_types
from soxs.instrument import RedistributionMatrixFile, \
AuxiliaryResponseFile, instrument_simulator
from soxs.events import write_spectrum
from soxs.instrument_registry import get_instrument_from_registry
ckms = clight.in_units("km/s").v
def setup():
from yt.config import ytcfg
ytcfg["yt", "__withintesting"] = "True"
try:
mucal_spec = get_instrument_from_registry("mucal")
except KeyError:
pass
rmf = RedistributionMatrixFile(mucal_spec["rmf"])
arf = AuxiliaryResponseFile(mucal_spec['arf'])
fit_model = TableApecModel(rmf.elo[0], rmf.ehi[-1], rmf.n_e)
agen_var = TableApecModel(rmf.elo[0], rmf.ehi[-1], rmf.n_e,
var_elem=["O", "Ca"], thermal_broad=True)
def mymodel(pars, x, xhi=None):
dx = x[1]-x[0]
tm = TBabsModel(pars[0])
tbabs = tm.get_absorb(x+0.5*dx)
bapec = fit_model.return_spectrum(pars[1], pars[2], pars[3], pars[4], velocity=pars[5])
eidxs = np.logical_and(rmf.elo >= x[0]-0.5*dx, rmf.elo <= x[-1]+0.5*dx)
return tbabs*bapec[eidxs]
def mymodel_var(pars, x, xhi=None):
dx = x[1]-x[0]
tm = TBabsModel(pars[0])
tbabs = tm.get_absorb(x+0.5*dx)
bapec = agen_var.return_spectrum(pars[1], pars[2], pars[3], pars[4],
elem_abund={"O": pars[5], "Ca": pars[6]})
eidxs = np.logical_and(rmf.elo >= x[0]-0.5*dx, rmf.elo <= x[-1]+0.5*dx)
return tbabs*bapec[eidxs]
@requires_module("sherpa")
def test_beta_model():
bms = BetaModelSource()
do_beta_model(bms, "velocity_z", "emission_measure")
@requires_module("sherpa")
def test_beta_model_nomove():
bms = BetaModelSource()
do_beta_model(bms, "velocity_z", "emission_measure",
axis="x", prng=89)
@requires_module("sherpa")
def test_beta_model_offaxis():
bms = BetaModelSource()
do_beta_model(bms, "velocity_z", "emission_measure",
axis=[1.0, -2.0, 5.0], prng=78)
@requires_module("sherpa")
def test_particle_beta_model():
bms = ParticleBetaModelSource()
do_beta_model(bms, "particle_velocity_z",
("io", "emission_measure"), prng=29)
@requires_module("sherpa")
def test_particle_beta_model_nomove():
bms = ParticleBetaModelSource()
do_beta_model(bms, "particle_velocity_z",
("io", "emission_measure"), axis="x",
prng=72)
@requires_module("sherpa")
def test_particle_beta_model_offaxis():
bms = ParticleBetaModelSource()
do_beta_model(bms, "particle_velocity_z",
("io", "emission_measure"), prng=67,
axis=[1.0, -2.0, 5.0])
def do_beta_model(source, v_field, em_field, axis="z",
prng=None):
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
if prng is None:
prng = source.prng
ds = source.ds
A = 30000.
exp_time = 1.0e4
redshift = 0.05
nH_sim = 0.02
sphere = ds.sphere("c", (0.5, "Mpc"))
kT_sim = source.kT
Z_sim = source.Z
thermal_model = ThermalSourceModel("apec", 0.1, 11.5, 20000,
Zmet=Z_sim, prng=prng)
photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
thermal_model)
D_A = photons.parameters["fid_d_a"]
norm_sim = sphere.quantities.total_quantity(em_field)
norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
norm_sim = float(norm_sim.in_cgs())
v1, v2 = sphere.quantities.weighted_variance(v_field, em_field)
if isinstance(axis, string_types):
if axis == "z":
fac = 1.0
else:
fac = 0.0
else:
axis /= np.sqrt(np.dot(axis, axis))
fac = np.dot(axis, [0.0, 0.0, 1.0])
sigma_sim = fac*float(v1.in_units("km/s"))
mu_sim = -fac*float(v2.in_units("km/s"))
events = photons.project_photons(axis, [30.0, 45.0], absorb_model="tbabs",
nH=nH_sim, prng=prng)
events.write_simput_file("beta_model", overwrite=True)
instrument_simulator("beta_model_simput.fits", "beta_model_evt.fits",
exp_time, "mucal", [30.0, 45.0],
overwrite=True, foreground=False, ptsrc_bkgnd=False,
instr_bkgnd=False,
prng=prng)
write_spectrum("beta_model_evt.fits", "beta_model_evt.pi", overwrite=True)
os.system("cp %s %s ." % (arf.filename, rmf.filename))
load_user_model(mymodel, "tbapec")
add_user_pars("tbapec", ["nH", "kT", "metallicity", "redshift", "norm", "velocity"],
[0.02, 4.0, 0.2, 0.04, norm_sim*0.8, 300.0],
parmins=[0.0, 0.1, 0.0, -200.0, 0.0, 0.0],
parmaxs=[10.0, 20.0, 10.0, 200.0, 1.0e9, 20000.0],
parfrozen=[True, False, False, False, False, False])
load_pha("beta_model_evt.pi")
set_stat("cstat")
set_method("levmar")
ignore(":0.6, 8.0:")
set_model("tbapec")
fit()
res = get_fit_results()
redshift_sim = (1.0+mu_sim/ckms)*(1.0+redshift) - 1.0
assert np.abs(res.parvals[0]-kT_sim)/kT_sim < 0.05
assert np.abs(res.parvals[1]-Z_sim)/Z_sim < 0.05
assert np.abs(res.parvals[2]-redshift_sim)/redshift_sim < 0.05
assert np.abs(res.parvals[3]-norm_sim) < 0.05
assert np.abs(res.parvals[4]-sigma_sim) < 30.0
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_vapec_beta_model():
bms = BetaModelSource()
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng = 45
ds = bms.ds
A = 30000.
exp_time = 1.0e4
redshift = 0.05
nH_sim = 0.02
sphere = ds.sphere("c", (0.5, "Mpc"))
kT_sim = bms.kT
Z_sim = bms.Z
O_sim = bms.O
Ca_sim = bms.Ca
var_elem = {"O": ("stream", "oxygen"),
"Ca": ("stream", "calcium")}
thermal_model = ThermalSourceModel("apec", 0.1, 11.5, 20000,
var_elem=var_elem,
Zmet=("gas","metallicity"),
prng=prng)
photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
thermal_model)
D_A = photons.parameters["fid_d_a"]
norm_sim = sphere.quantities.total_quantity("emission_measure")
norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
norm_sim = float(norm_sim.in_cgs())
events = photons.project_photons("z", [30.0, 45.0], absorb_model="tbabs",
nH=nH_sim, prng=prng, no_shifting=True)
new_events = Lynx_Calorimeter(events, prng=prng)
os.system("cp %s %s ." % (arf.filename, rmf.filename))
new_events.write_channel_spectrum("var_abund_beta_model_evt.pha", overwrite=True)
load_user_model(mymodel_var, "tbapec")
add_user_pars("tbapec", ["nH", "kT", "abund", "redshift", "norm", "O", "Ca"],
[nH_sim, 4.0, Z_sim, redshift, norm_sim*0.8, 0.3, 0.5],
parmins=[0.0, 0.1, 0.0, -20.0, 0.0, 0.0, 0.0],
parmaxs=[10.0, 20.0, 10.0, 20.0, 1.0e9, 10.0, 10.0],
parfrozen=[True, False, True, True, False, False, False])
load_pha("var_abund_beta_model_evt.pha")
set_stat("cstat")
set_method("levmar")
ignore(":0.6, 8.0:")
set_model("tbapec")
fit()
res = get_fit_results()
assert np.abs(res.parvals[0]-kT_sim)/kT_sim < 0.05
assert np.abs(res.parvals[1]-norm_sim)/norm_sim < 0.05
assert np.abs(res.parvals[2]-O_sim)/O_sim < 0.05
assert np.abs(res.parvals[3]-Ca_sim)/Ca_sim < 0.15
os.chdir(curdir)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
test_beta_model_nomove()
test_beta_model_offaxis()
test_particle_beta_model_nomove()
test_particle_beta_model_offaxis()
test_beta_model()
test_particle_beta_model()
test_vapec_beta_model()
| [
"numpy.abs",
"soxs.instrument.AuxiliaryResponseFile",
"shutil.rmtree",
"os.chdir",
"pyxsim.instruments.Lynx_Calorimeter",
"yt.utilities.physical_constants.clight.in_units",
"soxs.instrument.instrument_simulator",
"sherpa.astro.ui.load_user_model",
"sherpa.astro.ui.set_model",
"sherpa.astro.ui.get_... | [((1024, 1067), 'soxs.instrument.RedistributionMatrixFile', 'RedistributionMatrixFile', (["mucal_spec['rmf']"], {}), "(mucal_spec['rmf'])\n", (1048, 1067), False, 'from soxs.instrument import RedistributionMatrixFile, AuxiliaryResponseFile, instrument_simulator\n'), ((1074, 1114), 'soxs.instrument.AuxiliaryResponseFile', 'AuxiliaryResponseFile', (["mucal_spec['arf']"], {}), "(mucal_spec['arf'])\n", (1095, 1114), False, 'from soxs.instrument import RedistributionMatrixFile, AuxiliaryResponseFile, instrument_simulator\n'), ((1127, 1175), 'pyxsim.TableApecModel', 'TableApecModel', (['rmf.elo[0]', 'rmf.ehi[-1]', 'rmf.n_e'], {}), '(rmf.elo[0], rmf.ehi[-1], rmf.n_e)\n', (1141, 1175), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((1187, 1281), 'pyxsim.TableApecModel', 'TableApecModel', (['rmf.elo[0]', 'rmf.ehi[-1]', 'rmf.n_e'], {'var_elem': "['O', 'Ca']", 'thermal_broad': '(True)'}), "(rmf.elo[0], rmf.ehi[-1], rmf.n_e, var_elem=['O', 'Ca'],\n thermal_broad=True)\n", (1201, 1281), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((2003, 2028), 'yt.testing.requires_module', 'requires_module', (['"""sherpa"""'], {}), "('sherpa')\n", (2018, 2028), False, 'from yt.testing import requires_module\n'), ((2140, 2165), 'yt.testing.requires_module', 'requires_module', (['"""sherpa"""'], {}), "('sherpa')\n", (2155, 2165), False, 'from yt.testing import requires_module\n'), ((2321, 2346), 'yt.testing.requires_module', 'requires_module', (['"""sherpa"""'], {}), "('sherpa')\n", (2336, 2346), False, 'from yt.testing import requires_module\n'), ((2516, 2541), 'yt.testing.requires_module', 'requires_module', (['"""sherpa"""'], {}), "('sherpa')\n", (2531, 2541), False, 'from yt.testing import requires_module\n'), ((2714, 2739), 'yt.testing.requires_module', 'requires_module', (['"""sherpa"""'], {}), "('sherpa')\n", (2729, 2739), False, 'from yt.testing import requires_module\n'), ((2947, 2972), 'yt.testing.requires_module', 'requires_module', (['"""sherpa"""'], {}), "('sherpa')\n", (2962, 2972), False, 'from yt.testing import requires_module\n'), ((814, 837), 'yt.utilities.physical_constants.clight.in_units', 'clight.in_units', (['"""km/s"""'], {}), "('km/s')\n", (829, 837), False, 'from yt.utilities.physical_constants import clight\n'), ((953, 990), 'soxs.instrument_registry.get_instrument_from_registry', 'get_instrument_from_registry', (['"""mucal"""'], {}), "('mucal')\n", (981, 990), False, 'from soxs.instrument_registry import get_instrument_from_registry\n'), ((1366, 1385), 'pyxsim.TBabsModel', 'TBabsModel', (['pars[0]'], {}), '(pars[0])\n', (1376, 1385), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((1526, 1597), 'numpy.logical_and', 'np.logical_and', (['(rmf.elo >= x[0] - 0.5 * dx)', '(rmf.elo <= x[-1] + 0.5 * dx)'], {}), '(rmf.elo >= x[0] - 0.5 * dx, rmf.elo <= x[-1] + 0.5 * dx)\n', (1540, 1597), True, 'import numpy as np\n'), ((1686, 1705), 'pyxsim.TBabsModel', 'TBabsModel', (['pars[0]'], {}), '(pars[0])\n', (1696, 1705), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((1906, 1977), 'numpy.logical_and', 'np.logical_and', (['(rmf.elo >= x[0] - 0.5 * dx)', '(rmf.elo <= x[-1] + 0.5 * dx)'], {}), '(rmf.elo >= x[0] - 0.5 * dx, rmf.elo <= x[-1] + 0.5 * dx)\n', (1920, 1977), True, 'import numpy as np\n'), ((2062, 2079), 'pyxsim.tests.utils.BetaModelSource', 'BetaModelSource', ([], {}), '()\n', (2077, 2079), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((2206, 2223), 'pyxsim.tests.utils.BetaModelSource', 'BetaModelSource', ([], {}), '()\n', (2221, 2223), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((2388, 2405), 'pyxsim.tests.utils.BetaModelSource', 'BetaModelSource', ([], {}), '()\n', (2403, 2405), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((2584, 2609), 'pyxsim.tests.utils.ParticleBetaModelSource', 'ParticleBetaModelSource', ([], {}), '()\n', (2607, 2609), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((2789, 2814), 'pyxsim.tests.utils.ParticleBetaModelSource', 'ParticleBetaModelSource', ([], {}), '()\n', (2812, 2814), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((3023, 3048), 'pyxsim.tests.utils.ParticleBetaModelSource', 'ParticleBetaModelSource', ([], {}), '()\n', (3046, 3048), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((3293, 3311), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3309, 3311), False, 'import tempfile\n'), ((3325, 3336), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3334, 3336), False, 'import os\n'), ((3341, 3357), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (3349, 3357), False, 'import os\n'), ((3611, 3678), 'pyxsim.ThermalSourceModel', 'ThermalSourceModel', (['"""apec"""', '(0.1)', '(11.5)', '(20000)'], {'Zmet': 'Z_sim', 'prng': 'prng'}), "('apec', 0.1, 11.5, 20000, Zmet=Z_sim, prng=prng)\n", (3629, 3678), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((3732, 3805), 'pyxsim.PhotonList.from_data_source', 'PhotonList.from_data_source', (['sphere', 'redshift', 'A', 'exp_time', 'thermal_model'], {}), '(sphere, redshift, A, exp_time, thermal_model)\n', (3759, 3805), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((4644, 4837), 'soxs.instrument.instrument_simulator', 'instrument_simulator', (['"""beta_model_simput.fits"""', '"""beta_model_evt.fits"""', 'exp_time', '"""mucal"""', '[30.0, 45.0]'], {'overwrite': '(True)', 'foreground': '(False)', 'ptsrc_bkgnd': '(False)', 'instr_bkgnd': '(False)', 'prng': 'prng'}), "('beta_model_simput.fits', 'beta_model_evt.fits',\n exp_time, 'mucal', [30.0, 45.0], overwrite=True, foreground=False,\n ptsrc_bkgnd=False, instr_bkgnd=False, prng=prng)\n", (4664, 4837), False, 'from soxs.instrument import RedistributionMatrixFile, AuxiliaryResponseFile, instrument_simulator\n'), ((4936, 5010), 'soxs.events.write_spectrum', 'write_spectrum', (['"""beta_model_evt.fits"""', '"""beta_model_evt.pi"""'], {'overwrite': '(True)'}), "('beta_model_evt.fits', 'beta_model_evt.pi', overwrite=True)\n", (4950, 5010), False, 'from soxs.events import write_spectrum\n'), ((5016, 5070), 'os.system', 'os.system', (["('cp %s %s .' % (arf.filename, rmf.filename))"], {}), "('cp %s %s .' % (arf.filename, rmf.filename))\n", (5025, 5070), False, 'import os\n'), ((5076, 5110), 'sherpa.astro.ui.load_user_model', 'load_user_model', (['mymodel', '"""tbapec"""'], {}), "(mymodel, 'tbapec')\n", (5091, 5110), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5115, 5419), 'sherpa.astro.ui.add_user_pars', 'add_user_pars', (['"""tbapec"""', "['nH', 'kT', 'metallicity', 'redshift', 'norm', 'velocity']", '[0.02, 4.0, 0.2, 0.04, norm_sim * 0.8, 300.0]'], {'parmins': '[0.0, 0.1, 0.0, -200.0, 0.0, 0.0]', 'parmaxs': '[10.0, 20.0, 10.0, 200.0, 1000000000.0, 20000.0]', 'parfrozen': '[True, False, False, False, False, False]'}), "('tbapec', ['nH', 'kT', 'metallicity', 'redshift', 'norm',\n 'velocity'], [0.02, 4.0, 0.2, 0.04, norm_sim * 0.8, 300.0], parmins=[\n 0.0, 0.1, 0.0, -200.0, 0.0, 0.0], parmaxs=[10.0, 20.0, 10.0, 200.0, \n 1000000000.0, 20000.0], parfrozen=[True, False, False, False, False, False]\n )\n", (5128, 5419), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5469, 5498), 'sherpa.astro.ui.load_pha', 'load_pha', (['"""beta_model_evt.pi"""'], {}), "('beta_model_evt.pi')\n", (5477, 5498), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5503, 5520), 'sherpa.astro.ui.set_stat', 'set_stat', (['"""cstat"""'], {}), "('cstat')\n", (5511, 5520), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5525, 5545), 'sherpa.astro.ui.set_method', 'set_method', (['"""levmar"""'], {}), "('levmar')\n", (5535, 5545), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5550, 5570), 'sherpa.astro.ui.ignore', 'ignore', (['""":0.6, 8.0:"""'], {}), "(':0.6, 8.0:')\n", (5556, 5570), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5575, 5594), 'sherpa.astro.ui.set_model', 'set_model', (['"""tbapec"""'], {}), "('tbapec')\n", (5584, 5594), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5599, 5604), 'sherpa.astro.ui.fit', 'fit', ([], {}), '()\n', (5602, 5604), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5615, 5632), 'sherpa.astro.ui.get_fit_results', 'get_fit_results', ([], {}), '()\n', (5630, 5632), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((5974, 5990), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (5982, 5990), False, 'import os\n'), ((5995, 6016), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (6008, 6016), False, 'import shutil\n'), ((6059, 6076), 'pyxsim.tests.utils.BetaModelSource', 'BetaModelSource', ([], {}), '()\n', (6074, 6076), False, 'from pyxsim.tests.utils import BetaModelSource, ParticleBetaModelSource\n'), ((6091, 6109), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6107, 6109), False, 'import tempfile\n'), ((6123, 6134), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6132, 6134), False, 'import os\n'), ((6139, 6155), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (6147, 6155), False, 'import os\n'), ((6493, 6600), 'pyxsim.ThermalSourceModel', 'ThermalSourceModel', (['"""apec"""', '(0.1)', '(11.5)', '(20000)'], {'var_elem': 'var_elem', 'Zmet': "('gas', 'metallicity')", 'prng': 'prng'}), "('apec', 0.1, 11.5, 20000, var_elem=var_elem, Zmet=('gas',\n 'metallicity'), prng=prng)\n", (6511, 6600), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((6729, 6802), 'pyxsim.PhotonList.from_data_source', 'PhotonList.from_data_source', (['sphere', 'redshift', 'A', 'exp_time', 'thermal_model'], {}), '(sphere, redshift, A, exp_time, thermal_model)\n', (6756, 6802), False, 'from pyxsim import TableApecModel, TBabsModel, ThermalSourceModel, PhotonList\n'), ((7239, 7274), 'pyxsim.instruments.Lynx_Calorimeter', 'Lynx_Calorimeter', (['events'], {'prng': 'prng'}), '(events, prng=prng)\n', (7255, 7274), False, 'from pyxsim.instruments import Lynx_Calorimeter\n'), ((7280, 7334), 'os.system', 'os.system', (["('cp %s %s .' % (arf.filename, rmf.filename))"], {}), "('cp %s %s .' % (arf.filename, rmf.filename))\n", (7289, 7334), False, 'import os\n'), ((7427, 7465), 'sherpa.astro.ui.load_user_model', 'load_user_model', (['mymodel_var', '"""tbapec"""'], {}), "(mymodel_var, 'tbapec')\n", (7442, 7465), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7470, 7790), 'sherpa.astro.ui.add_user_pars', 'add_user_pars', (['"""tbapec"""', "['nH', 'kT', 'abund', 'redshift', 'norm', 'O', 'Ca']", '[nH_sim, 4.0, Z_sim, redshift, norm_sim * 0.8, 0.3, 0.5]'], {'parmins': '[0.0, 0.1, 0.0, -20.0, 0.0, 0.0, 0.0]', 'parmaxs': '[10.0, 20.0, 10.0, 20.0, 1000000000.0, 10.0, 10.0]', 'parfrozen': '[True, False, True, True, False, False, False]'}), "('tbapec', ['nH', 'kT', 'abund', 'redshift', 'norm', 'O', 'Ca'\n ], [nH_sim, 4.0, Z_sim, redshift, norm_sim * 0.8, 0.3, 0.5], parmins=[\n 0.0, 0.1, 0.0, -20.0, 0.0, 0.0, 0.0], parmaxs=[10.0, 20.0, 10.0, 20.0, \n 1000000000.0, 10.0, 10.0], parfrozen=[True, False, True, True, False, \n False, False])\n", (7483, 7790), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7839, 7879), 'sherpa.astro.ui.load_pha', 'load_pha', (['"""var_abund_beta_model_evt.pha"""'], {}), "('var_abund_beta_model_evt.pha')\n", (7847, 7879), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7884, 7901), 'sherpa.astro.ui.set_stat', 'set_stat', (['"""cstat"""'], {}), "('cstat')\n", (7892, 7901), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7906, 7926), 'sherpa.astro.ui.set_method', 'set_method', (['"""levmar"""'], {}), "('levmar')\n", (7916, 7926), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7931, 7951), 'sherpa.astro.ui.ignore', 'ignore', (['""":0.6, 8.0:"""'], {}), "(':0.6, 8.0:')\n", (7937, 7951), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7956, 7975), 'sherpa.astro.ui.set_model', 'set_model', (['"""tbapec"""'], {}), "('tbapec')\n", (7965, 7975), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7980, 7985), 'sherpa.astro.ui.fit', 'fit', ([], {}), '()\n', (7983, 7985), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((7996, 8013), 'sherpa.astro.ui.get_fit_results', 'get_fit_results', ([], {}), '()\n', (8011, 8013), False, 'from sherpa.astro.ui import load_user_model, add_user_pars, load_pha, ignore, fit, set_model, set_stat, set_method, get_fit_results\n'), ((8242, 8258), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (8250, 8258), False, 'import os\n'), ((8263, 8284), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (8276, 8284), False, 'import shutil\n'), ((4317, 4346), 'numpy.dot', 'np.dot', (['axis', '[0.0, 0.0, 1.0]'], {}), '(axis, [0.0, 0.0, 1.0])\n', (4323, 4346), True, 'import numpy as np\n'), ((5879, 5912), 'numpy.abs', 'np.abs', (['(res.parvals[3] - norm_sim)'], {}), '(res.parvals[3] - norm_sim)\n', (5885, 5912), True, 'import numpy as np\n'), ((5929, 5963), 'numpy.abs', 'np.abs', (['(res.parvals[4] - sigma_sim)'], {}), '(res.parvals[4] - sigma_sim)\n', (5935, 5963), True, 'import numpy as np\n'), ((4283, 4301), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (4289, 4301), True, 'import numpy as np\n'), ((5704, 5735), 'numpy.abs', 'np.abs', (['(res.parvals[0] - kT_sim)'], {}), '(res.parvals[0] - kT_sim)\n', (5710, 5735), True, 'import numpy as np\n'), ((5759, 5789), 'numpy.abs', 'np.abs', (['(res.parvals[1] - Z_sim)'], {}), '(res.parvals[1] - Z_sim)\n', (5765, 5789), True, 'import numpy as np\n'), ((5812, 5849), 'numpy.abs', 'np.abs', (['(res.parvals[2] - redshift_sim)'], {}), '(res.parvals[2] - redshift_sim)\n', (5818, 5849), True, 'import numpy as np\n'), ((8026, 8057), 'numpy.abs', 'np.abs', (['(res.parvals[0] - kT_sim)'], {}), '(res.parvals[0] - kT_sim)\n', (8032, 8057), True, 'import numpy as np\n'), ((8081, 8114), 'numpy.abs', 'np.abs', (['(res.parvals[1] - norm_sim)'], {}), '(res.parvals[1] - norm_sim)\n', (8087, 8114), True, 'import numpy as np\n'), ((8140, 8170), 'numpy.abs', 'np.abs', (['(res.parvals[2] - O_sim)'], {}), '(res.parvals[2] - O_sim)\n', (8146, 8170), True, 'import numpy as np\n'), ((8193, 8224), 'numpy.abs', 'np.abs', (['(res.parvals[3] - Ca_sim)'], {}), '(res.parvals[3] - Ca_sim)\n', (8199, 8224), True, 'import numpy as np\n')] |
import numpy as np
import torchvision
import time
import math
import os
import copy
import pdb
import argparse
import sys
import cv2
import skimage.io
import skimage.transform
import skimage.color
import skimage
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer, RGB_MEAN, RGB_STD
from scipy.optimize import linear_sum_assignment
assert torch.__version__.split('.')[1] == '4'
print('CUDA available: {}'.format(torch.cuda.is_available()))
color_list = [(0, 0, 255), (255, 0, 0), (0, 255, 0), (255, 0, 255), (0, 255, 255), (255, 255, 0), (128, 0, 255),
(0, 128, 255), (128, 255, 0), (0, 255, 128), (255, 128, 0), (255, 0, 128), (128, 128, 255), (128, 255, 128), (255, 128, 128), (128, 128, 0), (128, 0, 128)]
class detect_rect:
def __init__(self):
self.curr_frame = 0
self.curr_rect = np.array([0, 0, 1, 1])
self.next_rect = np.array([0, 0, 1, 1])
self.conf = 0
self.id = 0
@property
def position(self):
x = (self.curr_rect[0] + self.curr_rect[2])/2
y = (self.curr_rect[1] + self.curr_rect[3])/2
return np.array([x, y])
@property
def size(self):
w = self.curr_rect[2] - self.curr_rect[0]
h = self.curr_rect[3] - self.curr_rect[1]
return np.array([w, h])
class tracklet:
def __init__(self, det_rect):
self.id = det_rect.id
self.rect_list = [det_rect]
self.rect_num = 1
self.last_rect = det_rect
self.last_frame = det_rect.curr_frame
self.no_match_frame = 0
def add_rect(self, det_rect):
self.rect_list.append(det_rect)
self.rect_num = self.rect_num + 1
self.last_rect = det_rect
self.last_frame = det_rect.curr_frame
@property
def velocity(self):
if(self.rect_num < 2):
return (0, 0)
elif(self.rect_num < 6):
return (self.rect_list[self.rect_num - 1].position - self.rect_list[self.rect_num - 2].position) / (self.rect_list[self.rect_num - 1].curr_frame - self.rect_list[self.rect_num - 2].curr_frame)
else:
v1 = (self.rect_list[self.rect_num - 1].position - self.rect_list[self.rect_num - 4].position) / (self.rect_list[self.rect_num - 1].curr_frame - self.rect_list[self.rect_num - 4].curr_frame)
v2 = (self.rect_list[self.rect_num - 2].position - self.rect_list[self.rect_num - 5].position) / (self.rect_list[self.rect_num - 2].curr_frame - self.rect_list[self.rect_num - 5].curr_frame)
v3 = (self.rect_list[self.rect_num - 3].position - self.rect_list[self.rect_num - 6].position) / (self.rect_list[self.rect_num - 3].curr_frame - self.rect_list[self.rect_num - 6].curr_frame)
return (v1 + v2 + v3) / 3
def cal_iou(rect1, rect2):
x1, y1, x2, y2 = rect1
x3, y3, x4, y4 = rect2
i_w = min(x2, x4) - max(x1, x3)
i_h = min(y2, y4) - max(y1, y3)
if(i_w <= 0 or i_h <= 0):
return 0
i_s = i_w * i_h
s_1 = (x2 - x1) * (y2 - y1)
s_2 = (x4 - x3) * (y4 - y3)
return float(i_s) / (s_1 + s_2 - i_s)
def cal_simi(det_rect1, det_rect2):
return cal_iou(det_rect1.next_rect, det_rect2.curr_rect)
def cal_simi_track_det(track, det_rect):
if(det_rect.curr_frame <= track.last_frame):
print("cal_simi_track_det error")
return 0
elif(det_rect.curr_frame - track.last_frame == 1):
return cal_iou(track.last_rect.next_rect, det_rect.curr_rect)
else:
pred_rect = track.last_rect.curr_rect + np.append(track.velocity, track.velocity) * (det_rect.curr_frame - track.last_frame)
return cal_iou(pred_rect, det_rect.curr_rect)
def track_det_match(tracklet_list, det_rect_list, min_iou = 0.5):
num1 = len(tracklet_list)
num2 = len(det_rect_list)
cost_mat = np.zeros((num1, num2))
for i in range(num1):
for j in range(num2):
cost_mat[i, j] = -cal_simi_track_det(tracklet_list[i], det_rect_list[j])
match_result = linear_sum_assignment(cost_mat)
match_result = np.asarray(match_result)
match_result = np.transpose(match_result)
matches, unmatched1, unmatched2 = [], [], []
for i in range(num1):
if i not in match_result[:, 0]:
unmatched1.append(i)
for j in range(num2):
if j not in match_result[:, 1]:
unmatched2.append(j)
for i, j in match_result:
if cost_mat[i, j] > -min_iou:
unmatched1.append(i)
unmatched2.append(j)
else:
matches.append((i, j))
return matches, unmatched1, unmatched2
def draw_caption(image, box, caption, color):
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 8), cv2.FONT_HERSHEY_PLAIN, 2, color, 2)
def run_each_dataset(model_dir, retinanet, dataset_path, subset, cur_dataset):
print(cur_dataset)
img_list = os.listdir(os.path.join(dataset_path, subset, cur_dataset, 'img1'))
img_list = [os.path.join(dataset_path, subset, cur_dataset, 'img1', _) for _ in img_list if ('jpg' in _) or ('png' in _)]
img_list = sorted(img_list)
img_len = len(img_list)
last_feat = None
confidence_threshold = 0.4
IOU_threshold = 0.5
retention_threshold = 10
det_list_all = []
tracklet_all = []
max_id = 0
max_draw_len = 100
draw_interval = 5
img_width = 1920
img_height = 1080
fps = 30
for i in range(img_len):
det_list_all.append([])
for idx in range(img_len + 1):
i = idx - 1
print('tracking: ', i)
with torch.no_grad():
data_path1 = img_list[min(idx, img_len - 1)]
img_origin1 = skimage.io.imread(data_path1)
img_h, img_w, _ = img_origin1.shape
img_height, img_width = img_h, img_w
resize_h, resize_w = math.ceil(img_h / 32) * 32, math.ceil(img_w / 32) * 32
img1 = np.zeros((resize_h, resize_w, 3), dtype=img_origin1.dtype)
img1[:img_h, :img_w, :] = img_origin1
img1 = (img1.astype(np.float32) / 255.0 - np.array([[RGB_MEAN]])) / np.array([[RGB_STD]])
img1 = torch.from_numpy(img1).permute(2, 0, 1).view(1, 3, resize_h, resize_w)
scores, transformed_anchors, last_feat = retinanet(img1.cuda().float(), last_feat=last_feat)
if idx > 0:
idxs = np.where(scores>0.1)
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
x3 = int(bbox[4])
y3 = int(bbox[5])
x4 = int(bbox[6])
y4 = int(bbox[7])
det_conf = float(scores[idxs[0][j]])
det_rect = detect_rect()
det_rect.curr_frame = idx
det_rect.curr_rect = np.array([x1, y1, x2, y2])
det_rect.next_rect = np.array([x3, y3, x4, y4])
det_rect.conf = det_conf
if det_rect.conf > confidence_threshold:
det_list_all[det_rect.curr_frame - 1].append(det_rect)
if i == 0:
for j in range(len(det_list_all[i])):
det_list_all[i][j].id = j + 1
max_id = max(max_id, j + 1)
track = tracklet(det_list_all[i][j])
tracklet_all.append(track)
continue
matches, unmatched1, unmatched2 = track_det_match(tracklet_all, det_list_all[i], IOU_threshold)
for j in range(len(matches)):
det_list_all[i][matches[j][1]].id = tracklet_all[matches[j][0]].id
det_list_all[i][matches[j][1]].id = tracklet_all[matches[j][0]].id
tracklet_all[matches[j][0]].add_rect(det_list_all[i][matches[j][1]])
delete_track_list = []
for j in range(len(unmatched1)):
tracklet_all[unmatched1[j]].no_match_frame = tracklet_all[unmatched1[j]].no_match_frame + 1
if(tracklet_all[unmatched1[j]].no_match_frame >= retention_threshold):
delete_track_list.append(unmatched1[j])
origin_index = set([k for k in range(len(tracklet_all))])
delete_index = set(delete_track_list)
left_index = list(origin_index - delete_index)
tracklet_all = [tracklet_all[k] for k in left_index]
for j in range(len(unmatched2)):
det_list_all[i][unmatched2[j]].id = max_id + 1
max_id = max_id + 1
track = tracklet(det_list_all[i][unmatched2[j]])
tracklet_all.append(track)
#**************visualize tracking result and save evaluate file****************
fout_tracking = open(os.path.join(model_dir, 'results', cur_dataset + '.txt'), 'w')
save_img_dir = os.path.join(model_dir, 'results', cur_dataset)
if not os.path.exists(save_img_dir):
os.makedirs(save_img_dir)
out_video = os.path.join(model_dir, 'results', cur_dataset + '.mp4')
videoWriter = cv2.VideoWriter(out_video, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps, (img_width, img_height))
id_dict = {}
for i in range(img_len):
print('saving: ', i)
img = cv2.imread(img_list[i])
for j in range(len(det_list_all[i])):
x1, y1, x2, y2 = det_list_all[i][j].curr_rect.astype(int)
trace_id = det_list_all[i][j].id
id_dict.setdefault(str(trace_id),[]).append((int((x1+x2)/2), y2))
draw_trace_id = str(trace_id)
draw_caption(img, (x1, y1, x2, y2), draw_trace_id, color=color_list[trace_id % len(color_list)])
cv2.rectangle(img, (x1, y1), (x2, y2), color=color_list[trace_id % len(color_list)], thickness=2)
trace_len = len(id_dict[str(trace_id)])
trace_len_draw = min(max_draw_len, trace_len)
for k in range(trace_len_draw - draw_interval):
if(k % draw_interval == 0):
draw_point1 = id_dict[str(trace_id)][trace_len - k - 1]
draw_point2 = id_dict[str(trace_id)][trace_len - k - 1 - draw_interval]
cv2.line(img, draw_point1, draw_point2, color=color_list[trace_id % len(color_list)], thickness=2)
fout_tracking.write(str(i+1) + ',' + str(trace_id) + ',' + str(x1) + ',' + str(y1) + ',' + str(x2 - x1) + ',' + str(y2 - y1) + ',-1,-1,-1,-1\n')
cv2.imwrite(os.path.join(save_img_dir, str(i + 1).zfill(6) + '.jpg'), img)
videoWriter.write(img)
cv2.waitKey(0)
fout_tracking.close()
videoWriter.release()
def run_from_train(model_dir, root_path):
if not os.path.exists(os.path.join(model_dir, 'results')):
os.makedirs(os.path.join(model_dir, 'results'))
retinanet = torch.load(os.path.join(model_dir, 'model_final.pt'))
use_gpu = True
if use_gpu: retinanet = retinanet.cuda()
retinanet.eval()
for seq_num in [2, 4, 5, 9, 10, 11, 13]:
run_each_dataset(model_dir, retinanet, root_path, 'train', 'MOT17-{:02d}'.format(seq_num))
for seq_num in [1, 3, 6, 7, 8, 12, 14]:
run_each_dataset(model_dir, retinanet, root_path, 'test', 'MOT17-{:02d}'.format(seq_num))
def main(args=None):
parser = argparse.ArgumentParser(description='Simple script for testing a CTracker network.')
parser.add_argument('--dataset_path', default='/dockerdata/home/jeromepeng/data/MOT/MOT17/', type=str, help='Dataset path, location of the images sequence.')
parser.add_argument('--model_dir', default='./trained_model/', help='Path to model (.pt) file.')
parser = parser.parse_args(args)
if not os.path.exists(os.path.join(parser.model_dir, 'results')):
os.makedirs(os.path.join(parser.model_dir, 'results'))
retinanet = torch.load(os.path.join(parser.model_dir, 'model_final.pt'))
use_gpu = True
if use_gpu: retinanet = retinanet.cuda()
retinanet.eval()
for seq_num in [2, 4, 5, 9, 10, 11, 13]:
run_each_dataset(parser.model_dir, retinanet, parser.dataset_path, 'train', 'MOT17-{:02d}'.format(seq_num))
for seq_num in [1, 3, 6, 7, 8, 12, 14]:
run_each_dataset(parser.model_dir, retinanet, parser.dataset_path, 'test', 'MOT17-{:02d}'.format(seq_num))
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"torch.no_grad",
"os.path.join",
"torch.__version__.split",
"numpy.transpose",
"os.path.exists",
"numpy.append",
"skimage.io.imread",
"math.ceil",
"cv2.waitKey",
"numpy.asarray",
"torch.cuda.is_available",
"scipy.optimize.linear_sum_assi... | [((3731, 3753), 'numpy.zeros', 'np.zeros', (['(num1, num2)'], {}), '((num1, num2))\n', (3739, 3753), True, 'import numpy as np\n'), ((3899, 3930), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['cost_mat'], {}), '(cost_mat)\n', (3920, 3930), False, 'from scipy.optimize import linear_sum_assignment\n'), ((3948, 3972), 'numpy.asarray', 'np.asarray', (['match_result'], {}), '(match_result)\n', (3958, 3972), True, 'import numpy as np\n'), ((3990, 4016), 'numpy.transpose', 'np.transpose', (['match_result'], {}), '(match_result)\n', (4002, 4016), True, 'import numpy as np\n'), ((4505, 4591), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 8)', 'cv2.FONT_HERSHEY_PLAIN', '(2)', 'color', '(2)'], {}), '(image, caption, (b[0], b[1] - 8), cv2.FONT_HERSHEY_PLAIN, 2,\n color, 2)\n', (4516, 4591), False, 'import cv2\n'), ((8194, 8241), 'os.path.join', 'os.path.join', (['model_dir', '"""results"""', 'cur_dataset'], {}), "(model_dir, 'results', cur_dataset)\n", (8206, 8241), False, 'import os\n'), ((8326, 8382), 'os.path.join', 'os.path.join', (['model_dir', '"""results"""', "(cur_dataset + '.mp4')"], {}), "(model_dir, 'results', cur_dataset + '.mp4')\n", (8338, 8382), False, 'import os\n'), ((10434, 10523), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple script for testing a CTracker network."""'}), "(description=\n 'Simple script for testing a CTracker network.')\n", (10457, 10523), False, 'import argparse\n'), ((540, 568), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (563, 568), False, 'import torch\n'), ((616, 641), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (639, 641), False, 'import torch\n'), ((1005, 1027), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (1013, 1027), True, 'import numpy as np\n'), ((1048, 1070), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (1056, 1070), True, 'import numpy as np\n'), ((1247, 1263), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1255, 1263), True, 'import numpy as np\n'), ((1396, 1412), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (1404, 1412), True, 'import numpy as np\n'), ((4720, 4775), 'os.path.join', 'os.path.join', (['dataset_path', 'subset', 'cur_dataset', '"""img1"""'], {}), "(dataset_path, subset, cur_dataset, 'img1')\n", (4732, 4775), False, 'import os\n'), ((4791, 4849), 'os.path.join', 'os.path.join', (['dataset_path', 'subset', 'cur_dataset', '"""img1"""', '_'], {}), "(dataset_path, subset, cur_dataset, 'img1', _)\n", (4803, 4849), False, 'import os\n'), ((8112, 8168), 'os.path.join', 'os.path.join', (['model_dir', '"""results"""', "(cur_dataset + '.txt')"], {}), "(model_dir, 'results', cur_dataset + '.txt')\n", (8124, 8168), False, 'import os\n'), ((8251, 8279), 'os.path.exists', 'os.path.exists', (['save_img_dir'], {}), '(save_img_dir)\n', (8265, 8279), False, 'import os\n'), ((8284, 8309), 'os.makedirs', 'os.makedirs', (['save_img_dir'], {}), '(save_img_dir)\n', (8295, 8309), False, 'import os\n'), ((8426, 8468), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""m"""', '"""p"""', '"""4"""', '"""v"""'], {}), "('m', 'p', '4', 'v')\n", (8448, 8468), False, 'import cv2\n'), ((8579, 8602), 'cv2.imread', 'cv2.imread', (['img_list[i]'], {}), '(img_list[i])\n', (8589, 8602), False, 'import cv2\n'), ((9747, 9761), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9758, 9761), False, 'import cv2\n'), ((9994, 10035), 'os.path.join', 'os.path.join', (['model_dir', '"""model_final.pt"""'], {}), "(model_dir, 'model_final.pt')\n", (10006, 10035), False, 'import os\n'), ((10970, 11018), 'os.path.join', 'os.path.join', (['parser.model_dir', '"""model_final.pt"""'], {}), "(parser.model_dir, 'model_final.pt')\n", (10982, 11018), False, 'import os\n'), ((4477, 4490), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (4485, 4490), True, 'import numpy as np\n'), ((5344, 5359), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5357, 5359), False, 'import torch\n'), ((5428, 5457), 'skimage.io.imread', 'skimage.io.imread', (['data_path1'], {}), '(data_path1)\n', (5445, 5457), False, 'import skimage\n'), ((5630, 5688), 'numpy.zeros', 'np.zeros', (['(resize_h, resize_w, 3)'], {'dtype': 'img_origin1.dtype'}), '((resize_h, resize_w, 3), dtype=img_origin1.dtype)\n', (5638, 5688), True, 'import numpy as np\n'), ((9881, 9915), 'os.path.join', 'os.path.join', (['model_dir', '"""results"""'], {}), "(model_dir, 'results')\n", (9893, 9915), False, 'import os\n'), ((9933, 9967), 'os.path.join', 'os.path.join', (['model_dir', '"""results"""'], {}), "(model_dir, 'results')\n", (9945, 9967), False, 'import os\n'), ((10841, 10882), 'os.path.join', 'os.path.join', (['parser.model_dir', '"""results"""'], {}), "(parser.model_dir, 'results')\n", (10853, 10882), False, 'import os\n'), ((10900, 10941), 'os.path.join', 'os.path.join', (['parser.model_dir', '"""results"""'], {}), "(parser.model_dir, 'results')\n", (10912, 10941), False, 'import os\n'), ((5803, 5824), 'numpy.array', 'np.array', (['[[RGB_STD]]'], {}), '([[RGB_STD]])\n', (5811, 5824), True, 'import numpy as np\n'), ((6034, 6056), 'numpy.where', 'np.where', (['(scores > 0.1)'], {}), '(scores > 0.1)\n', (6042, 6056), True, 'import numpy as np\n'), ((3459, 3500), 'numpy.append', 'np.append', (['track.velocity', 'track.velocity'], {}), '(track.velocity, track.velocity)\n', (3468, 3500), True, 'import numpy as np\n'), ((5564, 5585), 'math.ceil', 'math.ceil', (['(img_h / 32)'], {}), '(img_h / 32)\n', (5573, 5585), False, 'import math\n'), ((5592, 5613), 'math.ceil', 'math.ceil', (['(img_w / 32)'], {}), '(img_w / 32)\n', (5601, 5613), False, 'import math\n'), ((5777, 5799), 'numpy.array', 'np.array', (['[[RGB_MEAN]]'], {}), '([[RGB_MEAN]])\n', (5785, 5799), True, 'import numpy as np\n'), ((6475, 6501), 'numpy.array', 'np.array', (['[x1, y1, x2, y2]'], {}), '([x1, y1, x2, y2])\n', (6483, 6501), True, 'import numpy as np\n'), ((6529, 6555), 'numpy.array', 'np.array', (['[x3, y3, x4, y4]'], {}), '([x3, y3, x4, y4])\n', (6537, 6555), True, 'import numpy as np\n'), ((5836, 5858), 'torch.from_numpy', 'torch.from_numpy', (['img1'], {}), '(img1)\n', (5852, 5858), False, 'import torch\n')] |
import numpy as np
from neuron import h
from .psd import PSD
class Exp2PSD(PSD):
"""
Simple double-exponential PSD from Neuron (fast).
"""
def __init__(self, section, terminal, weight=0.01, loc=0.5, tau1=0.1, tau2=0.3, erev=0):
"""
Parameters
----------
section : Section
The postsynaptic section in which to insert the receptor mechanism.
terminal : Terminal
The presynaptic Terminal instance
weight :
loc : float, default=0.5
Position on the postsynaptic section to insert the mechanism, from [0..1].
"""
PSD.__init__(self, section, terminal)
self.syn = h.Exp2Syn(loc, sec=section)
self.syn.tau1 = tau1
self.syn.tau2 = tau2
self.syn.e = erev
terminal.connect(self.syn, weight=weight)
@property
def n_psd(self):
"""The number of postsynaptic densities represented by this object.
"""
return 1
def record(self, *args):
"""Create a new set of vectors to record parameters for each release
site.
Parameters
----------
\*args :
Allowed parameters are 'i' (current), 'g' (conductnace), and 'Open' (open probability).
"""
self.vectors = {'ampa': [], 'nmda': []}
for receptor in self.vectors:
for mech in getattr(self, receptor+'_psd'):
vec = {}
for var in args:
vec[var] = h.Vector()
vec[var].record(getattr(mech, '_ref_'+var))
self.vectors[receptor].append(vec)
def get_vector(self, var):
"""Return an array from a previously recorded vector.
Parameters
----------
receptor : str
May be 'ampa' or 'nmda'
var : str
Allowed parameters are 'i' (current), 'g' (conductance), and 'Open' (open probability).
i : int, default=0
The integer index of the psd (if this is a multi-site synapse)
"""
v = self.vectors[receptor][i][var]
return np.array(v)
| [
"neuron.h.Vector",
"neuron.h.Exp2Syn",
"numpy.array"
] | [((702, 729), 'neuron.h.Exp2Syn', 'h.Exp2Syn', (['loc'], {'sec': 'section'}), '(loc, sec=section)\n', (711, 729), False, 'from neuron import h\n'), ((2182, 2193), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2190, 2193), True, 'import numpy as np\n'), ((1546, 1556), 'neuron.h.Vector', 'h.Vector', ([], {}), '()\n', (1554, 1556), False, 'from neuron import h\n')] |
import pickle
import numpy as np
from datetime import datetime
class Prediction:
def __init__(self, date, time):
self.date = date
self.time = time
def getIrradiance(self):
date_time_obj = datetime.strptime(self.time,'%H:%M:%S')
hour = date_time_obj.hour
print(hour)
date_obj = datetime.strptime(self.date, '%Y-%m-%d')
day_of_month = date_obj.day
print(day_of_month)
day_of_week = date_obj.weekday()
print(day_of_week)
month = date_obj.month
print(month)
year = date_obj.year
print(year)
with open('final_decision_Tree_model', 'rb') as f:
model = pickle.load(f)
test_data = np.array([hour, day_of_month, day_of_week, month, year])
irradaiance = model.predict(test_data.reshape(1, 5))
print(irradaiance[0])
return irradaiance[0]
# x = Prediction("2005-01-01", "00:00:00")
# x.getIrradiance()
# with open('final_decision_Tree_model', 'rb') as f:
# model = pickle.load(f)
#
# test_data = np.array([16, 2, 5, 1, 2021])
# answer =model.predict(test_data.reshape(1, 5))
# print(answer)
# list = answer.tolist()
# print(list[0])
| [
"datetime.datetime.strptime",
"pickle.load",
"numpy.array"
] | [((224, 264), 'datetime.datetime.strptime', 'datetime.strptime', (['self.time', '"""%H:%M:%S"""'], {}), "(self.time, '%H:%M:%S')\n", (241, 264), False, 'from datetime import datetime\n'), ((338, 378), 'datetime.datetime.strptime', 'datetime.strptime', (['self.date', '"""%Y-%m-%d"""'], {}), "(self.date, '%Y-%m-%d')\n", (355, 378), False, 'from datetime import datetime\n'), ((730, 786), 'numpy.array', 'np.array', (['[hour, day_of_month, day_of_week, month, year]'], {}), '([hour, day_of_month, day_of_week, month, year])\n', (738, 786), True, 'import numpy as np\n'), ((695, 709), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (706, 709), False, 'import pickle\n')] |
from SPARQLWrapper import SPARQLWrapper, JSON
from collections import defaultdict
from rltk.similarity import levenshtein_similarity
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from scipy.stats import rankdata
def generate_visualization_data(class_name, property_name):
'''
:param class_name: Name of class in the KG
:param property_name: Name of the property in the KG
:return(store_result): list of tuples
'''
'''
works for
1. Game ---> hasGenre
2. Game ---> hasTheme
3. Game ---> hasGameMode
4. Game ---> soldBy
5. Game ---> developedBy
6. Game ---> publisherBy
7. Game ---> memory_MB
8. Game ---> diskSpace_MB
9. Game ---> ratingValue
10. Enterprise ---> ratingValue
11. Seller ---> ratingValue
12. Game ---> datePublished
'''
store_result = list()
sparql = SPARQLWrapper("http://localhost:3030/games/query")
if (class_name == 'Game') and (
property_name == 'hasGenre' or property_name == 'hasTheme' or property_name == 'hasGameMode'):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?label (count(?label) as ?countLabel)
WHERE{
?game a mgns:''' + class_name + ''' .
?game mgns:''' + property_name + ''' ?genre .
?genre rdfs:label ?label
}
group by ?label
order by desc(?countLabel)
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
if (class_name == 'Game') and (property_name == 'soldBy' or property_name == 'developedBy' or property_name == 'publishedBy'):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?label (count(?label) as ?countLabel)
WHERE{
?game a mgns:Game .
?game mgns:''' + property_name + ''' ?s .
?s schema:name ?label .
}
group by ?label
order by desc(?countLabel)
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
if (class_name == 'Game') and (property_name == 'memory_MB' or property_name == 'diskSpace_MB'):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?label (count(?label) as ?countLabel)
WHERE{
?game a mgns:Game .
?game mgns:hasMSD ?s .
?s mgns:''' + property_name + ''' ?label .
}
group by ?label
order by desc(?countLabel)
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
if (class_name == 'Game' or class_name == 'Seller' or class_name == 'Enterprise') and (property_name == 'ratingValue'):
cont_val = []
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?label
WHERE{
?game a mgns:'''+class_name+''' .
?game mgns:ratingValue ?label .
FILTER(?label != -1)
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
store_result.append(result['label']['value'])
return store_result
if (class_name == 'Game') and (property_name == 'datePublished'):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?label (count(?label) as ?countLabel)
WHERE{
?game a mgns:Game .
?game schema:datePublished ?label .
}
group by ?label
order by desc(?countLabel)
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
#type_of_key = results['results']['bindings'][0]['label']
'''if ('xml:lang' in type_of_key) or ('datatype' in type_of_key and 'integer' in type_of_key['datatype']):
return store_result, "discrete"
if ('datatype' in type_of_key and 'decimal' in type_of_key['datatype']):
return store_result, "continuous"'''
# print(results)
for result in results['results']['bindings']:
store_result.append((result['label']['value'], result['countLabel']['value']))
return store_result
def getGameInformation(game_id):
sparql = SPARQLWrapper("http://localhost:3030/games/query")
game_info_dict = defaultdict(lambda: set())
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
#SELECT ?game_summary ?name ?released_year ?platform_name ?developer_name ?publisher_name ?game_mode_label ?genre_label ?theme_label ?#rating ?seller_name ?price ?discount ?url
SELECT ?game_summary ?name ?released_year ?platform_name ?developer_name ?publisher_name ?game_mode_label ?genre_label ?theme_label ?rating ?seller_name ?price ?discount ?url
WHERE{
mgns:'''+game_id+''' a mgns:Game ;
schema:name ?name ;
schema:description ?game_summary ;
OPTIONAL {mgns:'''+game_id+''' schema:datePublished ?released_year}.
OPTIONAL{mgns:'''+game_id+''' mgns:supportedPlatform ?platform .
?platform mgns:platformName ?platform_name } .
OPTIONAL{mgns:'''+game_id+''' mgns:developedBy ?developer .
?developer schema:name ?developer_name } .
OPTIONAL{mgns:'''+game_id+''' mgns:publishedBy ?publisher .
?publisher schema:name ?publisher_name} .
OPTIONAL{mgns:'''+game_id+''' mgns:hasGameMode ?game_mode .
?game_mode rdfs:label ?game_mode_label }.
OPTIONAL{mgns:'''+game_id+''' mgns:hasGenre ?genre .
?genre rdfs:label ?genre_label }.
OPTIONAL{mgns:'''+game_id+''' mgns:hasTheme ?theme .
?theme rdfs:label ?theme_label}.
OPTIONAL{mgns:'''+game_id+''' mgns:ratingValue ?rating} .
OPTIONAL{mgns:'''+game_id+''' mgns:soldBy ?seller .
?seller schema:name ?seller_name} .
OPTIONAL{mgns:'''+game_id+''' mgns:price_USD ?price} .
OPTIONAL{mgns:'''+game_id+''' mgns:discount_percent ?discount} .
OPTIONAL{mgns:'''+game_id+''' mgns:sellerURL ?url} .
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
for key in result.keys():
game_info_dict[key].add(result[key]['value'])
for key in game_info_dict.keys():
game_info_dict[key] = list(game_info_dict[key])
if 'game_summary' in game_info_dict:
game_info_dict['game_summary'] = ', '.join(x for x in game_info_dict['game_summary'])
else:
game_info_dict['game_summary'] = 'Not Available'
if 'name' in game_info_dict:
game_info_dict['name'] = ', '.join(x for x in game_info_dict['name'])
else:
game_info_dict['name'] = 'Not Available'
if 'released_year' in game_info_dict:
game_info_dict['released_year'] = ', '.join(x for x in game_info_dict['released_year'])
else:
game_info_dict['released_year'] = 'Not Available'
if 'platform_name' in game_info_dict:
game_info_dict['platform_name'] = ', '.join(x for x in game_info_dict['platform_name'])
else:
game_info_dict['platform_name'] = 'Not Available'
if 'developer_name' in game_info_dict:
game_info_dict['developer_name'] = ', '.join(x for x in game_info_dict['developer_name'])
else:
game_info_dict['developer_name'] = 'Not Available'
if 'publisher_name' in game_info_dict:
game_info_dict['publisher_name'] = ', '.join(x for x in game_info_dict['publisher_name'])
else:
game_info_dict['publisher_name'] = 'Not Available'
if 'game_mode_label' in game_info_dict:
game_info_dict['game_mode_label'] = ', '.join(x for x in game_info_dict['game_mode_label'])
else:
game_info_dict['game_mode_label'] = 'Not Available'
if 'genre_label' in game_info_dict:
game_info_dict['genre_label'] = ', '.join(x for x in game_info_dict['genre_label'])
else:
game_info_dict['genre_label'] = 'Not Available'
if 'theme_label' in game_info_dict:
game_info_dict['theme_label'] = ', '.join(x for x in game_info_dict['theme_label'])
else:
game_info_dict['theme_label'] = 'Not Available'
if 'rating' in game_info_dict:
game_info_dict['rating'] = ', '.join(x for x in game_info_dict['rating'])
else:
game_info_dict['rating'] = 'Not Available'
if 'seller_name' in game_info_dict:
game_info_dict['seller_name'] = ', '.join(x for x in game_info_dict['seller_name'])
else:
game_info_dict['seller_name'] = 'Not Available'
if 'price' in game_info_dict:
game_info_dict['price'] = ', '.join(x for x in game_info_dict['price'])
else:
game_info_dict['price'] = 'Not Available'
if 'discount' in game_info_dict:
game_info_dict['discount'] = ', '.join(x for x in game_info_dict['discount'])
else:
game_info_dict['discount'] = 'Not Available'
if 'url' in game_info_dict:
game_info_dict['url'] = ', '.join(x for x in game_info_dict['url'])
else:
game_info_dict['url'] = 'Not Available'
return game_info_dict
def getGameRequirementsInformation(game_id):
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
PREFIX sc: <http://purl.org/science/owl/sciencecommons/>
SELECT ?game_id ?memory_val ?disk_val ?p_name ?g_name
WHERE{
?game_id a mgns:Game .
FILTER(?game_id=mgns:''' + str(game_id) + ''')
?game_id mgns:hasMSD ?msd_id .
?msd_id mgns:memory_MB ?memory_val .
?msd_id mgns:diskSpace_MB ?disk_val .
?msd_id mgns:processor ?proc_id .
?proc_id schema:name ?p_name .
?msd_id mgns:graphics ?gr_id .
?gr_id schema:name ?g_name .
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
game_req_list = []
for result in results['results']['bindings']:
memory_val = result['memory_val']['value']
disk_val = result['disk_val']['value']
p_name = result['p_name']['value']
g_name = result['g_name']['value']
cur_req_string = str(memory_val) + " MB RAM, " + str(disk_val) + " MB HDD, " + "Processor = " + p_name + ", Graphics card = " + g_name
game_req_list.append(cur_req_string)
for i in range(0, len(game_req_list)-1):
game_req_list[i] = game_req_list[i] + " (or) "
return game_req_list
def getRecommendedGameInformation(game_id, device_config, embeddings_model):
rating_threshold = 80
p_score_device = device_config["processor_score"]
g_score_device = device_config["graphics_card_score"]
ram_size = device_config["ram_MB"]
hdd_size = device_config["hdd_space_MB"]
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?game_id ?game_name
WHERE{
?game_id a mgns:Game .
?game_id schema:name ?game_name .
?game_id mgns:ratingValue ?rating_value .
FILTER(?rating_value >= ''' + str(rating_threshold) + ''')
?game_id mgns:hasMSD ?msd_id .
?msd_id mgns:memory_MB ?memory_val .
FILTER(?memory_val <= ''' + str(ram_size) + ''')
?msd_id mgns:diskSpace_MB ?disk_val .
FILTER(?disk_val <= ''' + str(hdd_size) + ''')
?msd_id mgns:processor ?proc_id .
?proc_id mgns:hasCPUMark ?p_score .
FILTER(?p_score <= ''' + str(p_score_device) + ''')
?msd_id mgns:graphics ?gr_id .
?gr_id mgns:g3dMark ?g_score .
FILTER(?g_score <= ''' + str(g_score_device) + ''')
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
game_dict = {}
for result in results['results']['bindings']:
gid = result['game_id']['value'].split("#")[-1]
gname = result['game_name']['value']
game_dict[gid] = gname
short_game_id = game_id.split("#")[-1]
cur_game_embed = embeddings_model[short_game_id].reshape(1, -1)
game_urls = []
game_embedding_matrix = []
for gid in game_dict.keys():
if gid == game_id:
continue
game_urls.append(gid)
game_embedding_matrix.append(embeddings_model[gid])
game_embedding_matrix = np.array(game_embedding_matrix)
cosine_sim_vals = cosine_similarity(cur_game_embed, game_embedding_matrix)
ranks = rankdata(-cosine_sim_vals, method="ordinal")
top_5_idx = list(np.where(ranks <= 5)[0])
recommended_games_info_dict = {}
for idx, rank in zip(top_5_idx, ranks[top_5_idx]):
cur_dict = {}
cur_dict["game_id"] = game_urls[idx]
cur_dict["game_name"] = game_dict[game_urls[idx]]
recommended_games_info_dict[rank] = cur_dict
return recommended_games_info_dict
def getGenres():
genre_list = []
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT distinct ?genre_label
WHERE{
?game a mgns:Game .
?game mgns:hasGenre ?genre .
?genre rdfs:label ?genre_label
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
genre_list.append(result['genre_label']['value'])
return genre_list
def getThemes():
theme_list = []
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery(
'''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT distinct ?theme_label
WHERE{
?game a mgns:Game .
?game mgns:hasTheme ?theme .
?theme rdfs:label ?theme_label .
}
'''
)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
theme_list.append(result['theme_label']['value'])
return theme_list
def getGameModes():
game_mode_list = []
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery(
'''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT distinct ?game_mode_label
WHERE{
?game a mgns:Game .
?game mgns:hasGameMode ?game_mode .
?game_mode rdfs:label ?game_mode_label .
}
'''
)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
game_mode_list.append(result['game_mode_label']['value'])
return game_mode_list
def getClassProperties():
class_properties_dict = {}
class_properties_dict['Game'] = ['hasGenre','hasTheme','hasGameMode','soldBy','developedBy','publishedBy','memory_MB',
'diskSpace_MB','ratingValue','datePublished']
class_properties_dict['Enterprise'] = ['ratingValue']
class_properties_dict['Seller'] = ['ratingValue']
return class_properties_dict
def getPrefixQuery():
prefix = '''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
'''
return prefix
def getGameNameQuery():
pattern = '''
{
?game_id a mgns:Game .
?game_id schema:name ?game_name .
}
'''
return pattern,'?game_name '
def gameNameField(game_name):
pattern = '''
{
?game_id schema:name ?game_name .
FILTER contains(lcase(str(?game_name)),\"'''+game_name+'''\") .
}
'''
return pattern
def getReleasedYearQuery(released_year):
pattern = '''
{
?game_id schema:datePublished ?released_year .
FILTER(?released_year = ''' + str(released_year) + ''')
}
'''
return pattern,'?released_year '
def getMinRatingQuery(min_rating):
pattern = '''
{
?game_id mgns:ratingValue ?rating_value .
FILTER(?rating_value > ''' + str(min_rating) + ''')
}
'''
return pattern,'?rating_value '
def getSupportedPlatform(platform_name):
pattern = '''
{
?game_id mgns:supportedPlatform ?platform_id .
?platform_id mgns:platformName ?platform_name .
FILTER contains(lcase(str(?platform_name)),\"'''+str(platform_name)+'''\") .
}
'''
return pattern,'?platform_name '
def getDeveloper(developer_name):
pattern = '''
{
?game_id mgns:developedBy ?developer_id .
?developer_id schema:name ?developer_name .
FILTER contains(lcase(str(?developer_name)),\"'''+developer_name+'''\") .
}
'''
return pattern,'?developer_name '
def getPublisher(publisher_name):
pattern = '''
{
?game_id mgns:publishedBy ?publisher_id .
?publisher_id schema:name ?publisher_name .
FILTER contains(lcase(str(?publisher_name)),\"'''+publisher_name+'''\").
}
'''
return pattern,'?publisher_name '
def getSeller(seller_name):
pattern = '''
{
?game_id mgns:soldBy ?seller_id .
?seller_id schema:name ?seller_name .
FILTER contains(lcase(str(?seller_name)),\"'''+seller_name+'''\").
}
'''
return pattern,'?seller_name '
def getGenreQuery(genre):
pattern = '''
{
?game_id mgns:hasGenre ?genre_id .
?genre_id rdfs:label ?genre .
FILTER CONTAINS(lcase(str(?genre)),\"'''+str(genre)+'''\") .
}
'''
return pattern,'?genre '
def getThemeQuery(theme):
pattern = '''
{
?game_id mgns:hasTheme ?theme_id .
?theme_id rdfs:label ?theme .
FILTER CONTAINS(lcase(str(?theme)),\"''' + str(theme) + '''\") .
}
'''
return pattern,'?theme '
def getGameModeQuery(game_mode):
pattern = '''
{
?game_id mgns:hasGameMode ?game_mode_id .
?game_mode_id rdfs:label ?game_mode .
FILTER CONTAINS(lcase(str(?game_mode)),\"''' + str(game_mode) + '''\") .
}
'''
return pattern,'?game_mode '
def getMinPriceQuery(min_price):
pattern = '''
{
?game_id mgns:price_USD ?price .
FILTER(?price > '''+str(min_price)+''') .
}
'''
return pattern,'?price '
def getMaxPriceQuery(max_price):
pattern = '''
{
?game_id mgns:price_USD ?price .
FILTER(?price < ''' + str(max_price) + ''') .
}
'''
return pattern,'?price '
def getMinDiscountQuery(min_discount):
pattern = '''
{
?game_id mgns:discount_percent ?discount_in_percent .
FILTER(?discount_in_percent > ''' + str(min_discount) + ''') .
}
'''
return pattern,'?discount_in_percent '
def getMaxDiscountQuery(max_discount):
pattern = '''
{
?game_id mgns:discount_percent ?discount_in_percent .
FILTER(?discount_in_percent < ''' + str(max_discount) + ''') .
}
'''
return pattern,'?discount_in_percent '
def create_query_general(game_name = '', released_year = '',min_rating = '', input_platform = '', input_developer = '', input_publisher = '',
input_seller = '', genre = '',theme = '', game_mode = '', min_price = '', max_price = '', min_discount = '',
max_discount = ''):
select_query = 'select distinct'
query = ''
query_pattern, select_var = getGameNameQuery()
query += query_pattern
select_query += '?game_id '
select_query += select_var
if game_name != '':
query += gameNameField(game_name)
if released_year != '':
query_pattern, select_var = getReleasedYearQuery(released_year)
query += query_pattern
select_query += select_var
if min_rating != '':
query_pattern, select_var = getMinRatingQuery(min_rating)
query += query_pattern
select_query += select_var
if input_platform != '':
query_pattern, select_var = getSupportedPlatform(input_platform)
query += query_pattern
select_query += select_var
if input_developer != '':
query_pattern, select_var = getDeveloper(input_developer)
query += query_pattern
select_query += select_var
if input_publisher != '':
query_pattern, select_var = getPublisher(input_publisher)
query += query_pattern
select_query += select_var
if input_seller != '':
query_pattern, select_var = getSeller(input_seller)
query += query_pattern
select_query += select_var
if genre != '':
query_pattern, select_var = getGenreQuery(genre)
query += query_pattern
select_query += select_var
if theme != '':
query_pattern, select_var = getThemeQuery(theme)
query += query_pattern
select_query += select_var
if game_mode != '':
query_pattern, select_var = getGameModeQuery(game_mode)
query += query_pattern
select_query += select_var
if min_price != '':
query_pattern, select_var = getMinPriceQuery(min_price)
query += query_pattern
if '?price ' not in select_query:
select_query += select_var
if max_price != '':
query_pattern, select_var = getMaxPriceQuery(max_price)
query += query_pattern
if '?price ' not in select_query:
select_query += select_var
if min_discount != '':
query_pattern, select_var = getMinDiscountQuery(min_discount)
query += query_pattern
if '?discount_in_percent ' not in select_query:
select_query += select_var
if max_discount != '':
query_pattern, select_var = getMaxDiscountQuery(max_discount)
query += query_pattern
if '?discount_in_percent ' not in select_query:
select_query += select_var
return query, select_query
def create_query(game_name = '', released_year = '',min_rating = '', input_platform = '', input_developer = '', input_publisher = '',
input_seller = '', genre = '',theme = '', game_mode = '', min_price = '', max_price = '', min_discount = '',
max_discount = '',support='all', device_config=None):
p_score_device = device_config["processor_score"]
g_score_device = device_config["graphics_card_score"]
ram_size = device_config["ram_MB"]
hdd_size = device_config["hdd_space_MB"]
if support == 'all':
query,select_query = create_query_general(game_name,released_year,min_rating,input_platform,input_developer,input_publisher,input_seller,
genre,theme,game_mode,min_price,max_price,min_discount,max_discount)
return query,select_query
if support == 'only_supported':
query,select_query = create_query_general(game_name,released_year,min_rating,input_platform,input_developer,input_publisher,input_seller,
genre,theme,game_mode,min_price,max_price,min_discount,max_discount)
hardware_query = '''{
?game_id mgns:hasMSD ?msd_id .
?msd_id mgns:memory_MB ?memory_val .
FILTER(?memory_val <= ''' + str(ram_size) + ''')
?msd_id mgns:diskSpace_MB ?disk_val .
FILTER(?disk_val <= ''' + str(hdd_size) + ''')
?msd_id mgns:processor ?proc_id .
?proc_id mgns:hasCPUMark ?p_score .
FILTER(?p_score <= ''' + str(p_score_device) + ''')
?msd_id mgns:graphics ?gr_id .
?gr_id mgns:g3dMark ?g_score .
FILTER(?g_score <= ''' + str(g_score_device) + ''')
}'''
query += hardware_query
select_query += '?memory_val ?disk_val '
return query,select_query
if support == 'only_not_supported':
query, select_query = create_query_general(game_name, released_year, min_rating, input_platform,
input_developer, input_publisher, input_seller,
genre, theme, game_mode, min_price, max_price, min_discount,
max_discount)
hardware_query = '''{
?game_id mgns:hasMSD ?msd_id .
?msd_id mgns:memory_MB ?memory_val .
?msd_id mgns:diskSpace_MB ?disk_val .
?msd_id mgns:processor ?proc_id .
?proc_id mgns:hasCPUMark ?p_score .
?msd_id mgns:graphics ?gr_id .
?gr_id mgns:g3dMark ?g_score .
FILTER(?memory_val > '''+str(ram_size)+''' || ?disk_val > ''' +str(hdd_size)+''' || ?p_score > ''' +str(p_score_device)+ ''' || ?g_score > ''' +str(g_score_device)+''')
}'''
query += hardware_query
print(query)
print(select_query)
return query, select_query
def final_query(param_dict, device_config):
sparql = SPARQLWrapper("http://localhost:3030/games/query")
prefix_query = getPrefixQuery()
query_generated,select_query = create_query(game_name=param_dict["game_name"].lower(),released_year=param_dict["released_year"].lower(),min_rating=param_dict["min_rating"],
input_platform=param_dict["platform"].lower(),input_developer=param_dict['developer'].lower(),input_publisher=param_dict['publisher'].lower(),
input_seller=param_dict['seller'].lower(),genre=param_dict["genre"].lower(),theme=param_dict['theme'].lower(),game_mode=param_dict['game_mode'].lower(),
min_price=param_dict['min_price'],max_price=param_dict['max_price'],min_discount=param_dict['min_discount'],
max_discount=param_dict['max_discount'],support=param_dict['support'], device_config=device_config)
query = prefix_query + select_query + '\n where {' + query_generated + '}'
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
res = dict()
cols = []
data = []
for result in results['results']['bindings']:
if len(cols) == 0:
cols = list(result.keys())
cur_dict = {}
for key in result.keys():
cur_dict[key] = result[key]['value']
data.append(cur_dict)
res["cols"] = cols
res["data"] = data
return res
def convertSizeToMB(cur_size):
cur_size = cur_size.lower()
cur_val = ""
for cur_char in cur_size:
if cur_char.isdigit():
cur_val += cur_char
else:
break
cur_val = int(cur_val)
cur_unit = cur_size
if "kb" in cur_unit:
cur_val /= 1024
elif "gb" in cur_unit:
cur_val *= 1024
elif "tb" in cur_unit:
cur_val *= (1024 * 1024)
return cur_val
def getCPUs():
cpu_dict = {}
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT distinct ?cpu_id ?cpu_name ?cpu_score
WHERE{
?cpu_id a mgns:Processor .
?cpu_id schema:name ?cpu_name .
?cpu_id mgns:hasCPUMark ?cpu_score .
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
cpu_id = result['cpu_id']['value']
cpu_name = result['cpu_name']['value']
cpu_score = result['cpu_score']['value']
cpu_dict[cpu_id] = (cpu_name, cpu_score)
return cpu_dict
def getGPUs():
gpu_dict = {}
sparql = SPARQLWrapper("http://localhost:3030/games/query")
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT distinct ?gpu_id ?gpu_name ?gpu_score
WHERE{
?gpu_id a mgns:Graphics .
?gpu_id schema:name ?gpu_name .
?gpu_id mgns:g3dMark ?gpu_score .
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results['results']['bindings']:
gpu_id = result['gpu_id']['value']
gpu_name = result['gpu_name']['value']
gpu_score = result['gpu_score']['value']
gpu_dict[gpu_id] = (gpu_name, gpu_score)
return gpu_dict
def getLinkedDeviceData(input_device_param_dict):
device_config = {}
valid_flag = 1
hdd_space = input_device_param_dict["hdd_space"]
if len(hdd_space) != 0:
device_config["hdd_space_MB"] = convertSizeToMB(hdd_space)
else:
device_config["hdd_space_MB"] = -1
valid_flag = 0
ram = input_device_param_dict["ram"]
if len(ram) != 0:
device_config["ram_MB"] = convertSizeToMB(ram)
else:
device_config["ram_MB"] = -1
valid_flag = 0
# Mapping CPU
processor = input_device_param_dict["processor"].lower()
cpu_dict = getCPUs()
if len(processor) != 0:
max_match_id = None
max_match_val = None
max_match_score = -1
for key, val in cpu_dict.items():
cur_score = levenshtein_similarity(processor, val[0].lower())
if cur_score > max_match_score:
max_match_score = cur_score
max_match_id = key
max_match_val = val[0]
device_config["processor_id"] = max_match_id
device_config["processor_val"] = max_match_val
device_config["processor_score"] = cpu_dict[max_match_id][1]
else:
device_config["processor_id"] = None
device_config["processor_val"] = None
device_config["processor_score"] = -1
valid_flag = 0
# Mapping GPU
graphics_card = input_device_param_dict["graphics_card"].lower()
gpu_dict = getGPUs()
if len(graphics_card) != 0:
max_match_id = None
max_match_val = None
max_match_score = -1
for key, val in gpu_dict.items():
cur_score = levenshtein_similarity(graphics_card, val[0].lower())
if cur_score > max_match_score:
max_match_score = cur_score
max_match_id = key
max_match_val = val[0]
device_config["graphics_card_id"] = max_match_id
device_config["graphics_card_val"] = max_match_val
device_config["graphics_card_score"] = gpu_dict[max_match_id][1]
else:
device_config["graphics_card_id"] = None
device_config["graphics_card_val"] = None
device_config["graphics_card_score"] = -1
valid_flag = 0
return device_config, valid_flag
| [
"sklearn.metrics.pairwise.cosine_similarity",
"scipy.stats.rankdata",
"SPARQLWrapper.SPARQLWrapper",
"numpy.where",
"numpy.array"
] | [((935, 985), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (948, 985), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((5704, 5754), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (5717, 5754), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((10884, 10934), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (10897, 10934), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((12824, 12874), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (12837, 12874), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((14800, 14831), 'numpy.array', 'np.array', (['game_embedding_matrix'], {}), '(game_embedding_matrix)\n', (14808, 14831), True, 'import numpy as np\n'), ((14854, 14910), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['cur_game_embed', 'game_embedding_matrix'], {}), '(cur_game_embed, game_embedding_matrix)\n', (14871, 14910), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((14923, 14967), 'scipy.stats.rankdata', 'rankdata', (['(-cosine_sim_vals)'], {'method': '"""ordinal"""'}), "(-cosine_sim_vals, method='ordinal')\n", (14931, 14967), False, 'from scipy.stats import rankdata\n'), ((15376, 15426), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (15389, 15426), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((16166, 16216), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (16179, 16216), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((16971, 17021), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (16984, 17021), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((28180, 28230), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (28193, 28230), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((30111, 30161), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (30124, 30161), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((31056, 31106), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""http://localhost:3030/games/query"""'], {}), "('http://localhost:3030/games/query')\n", (31069, 31106), False, 'from SPARQLWrapper import SPARQLWrapper, JSON\n'), ((14989, 15009), 'numpy.where', 'np.where', (['(ranks <= 5)'], {}), '(ranks <= 5)\n', (14997, 15009), True, 'import numpy as np\n')] |
"""Viewing Box Module
This file contains a class required for creating a viewing box enabling the user to view the data.
Usage:
To use this module, import it and instantiate is as you wish:
from Paint4Brains.GUI.ModViewBox import ModViewBox
view = ModViewBox()
"""
import numpy as np
from pyqtgraph.Qt import QtCore
from pyqtgraph.Point import Point
from pyqtgraph import ViewBox
from pyqtgraph import functions as fn
class ModViewBox(ViewBox):
"""ModViewBox class for Paint4Brains.
This class is required for creating a viewing box enabling the user to view the data.
Args:
parent (class): Base or parent class
"""
def __init__(self, parent=None):
super(ModViewBox, self).__init__(parent=parent)
# By default not in drawing mode
self.drawing = False
self.state['mouseMode'] = 3
self.setAspectLocked(True)
def mouseDragEvent(self, ev, axis=None):
"""Mouse drag tracker
This function keep track of the mouse position and overwrites it to take the draw mode into account.
Args:
ev: signal emitted when user releases a mouse button.
"""
# Overwritting mouseDragEvent to take drawmode into account.
ev.accept()
pos = ev.pos()
lastPos = ev.lastPos()
dif = pos - lastPos
dif = dif * -1
# Ignore axes if mouse is disabled
mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float)
mask = mouseEnabled.copy()
if axis is not None:
mask[1 - axis] = 0.0
# If in drawing mode (editted part):
if self.drawing:
self.state['mouseMode'] = self.RectMode
# If right button is selected draw zoom in boxes:
if ev.button() & QtCore.Qt.RightButton:
if ev.isFinish():
self.rbScaleBox.hide()
ax = QtCore.QRectF(
Point(ev.buttonDownPos(ev.button())), Point(pos))
ax = self.childGroup.mapRectFromParent(ax)
self.showAxRect(ax)
self.axHistoryPointer += 1
self.axHistory = self.axHistory[:self.axHistoryPointer] + [ax]
else:
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
# If Left Button is selected drag image (This will be overwritten in the image by the drawing kernel)
elif ev.button() & QtCore.Qt.LeftButton:
tr = dif * mask
tr = self.mapToView(tr) - self.mapToView(Point(0, 0))
x = tr.x() if mask[0] == 1 else None
y = tr.y() if mask[1] == 1 else None
self._resetTarget()
if x is not None or y is not None:
self.translateBy(x=x, y=y)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
# If Middle Button (wheel) zoom in or out.
elif ev.button() & QtCore.Qt.MidButton:
if self.state['aspectLocked'] is not False:
mask[0] = 0
dif = ev.screenPos() - ev.lastScreenPos()
dif = np.array([dif.x(), dif.y()])
dif[0] *= -1
s = ((mask * 0.02) + 1) ** dif
tr = self.childGroup.transform()
tr = fn.invertQTransform(tr)
x = s[0] if mouseEnabled[0] == 1 else None
y = s[1] if mouseEnabled[1] == 1 else None
center = Point(tr.map(ev.buttonDownPos(QtCore.Qt.LeftButton)))
self._resetTarget()
self.scaleBy(x=x, y=y, center=center)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
# If not in drawing mode: (original functionality)
else:
# Scale or translate based on mouse button
if ev.button() & (QtCore.Qt.LeftButton | QtCore.Qt.MidButton):
if self.state['mouseMode'] == ViewBox.RectMode:
if ev.isFinish(): # This is the final move in the drag; change the view scale now
# print "finish"
self.rbScaleBox.hide()
ax = QtCore.QRectF(
Point(ev.buttonDownPos(ev.button())), Point(pos))
ax = self.childGroup.mapRectFromParent(ax)
self.showAxRect(ax)
self.axHistoryPointer += 1
self.axHistory = self.axHistory[:self.axHistoryPointer] + [
ax]
else:
# update shape of scale box
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
else:
tr = dif * mask
tr = self.mapToView(tr) - self.mapToView(Point(0, 0))
x = tr.x() if mask[0] == 1 else None
y = tr.y() if mask[1] == 1 else None
self._resetTarget()
if x is not None or y is not None:
self.translateBy(x=x, y=y)
self.sigRangeChangedManually.emit(
self.state['mouseEnabled'])
elif ev.button() & QtCore.Qt.RightButton:
# print "vb.rightDrag"
if self.state['aspectLocked'] is not False:
mask[0] = 0
dif = ev.screenPos() - ev.lastScreenPos()
dif = np.array([dif.x(), dif.y()])
dif[0] *= -1
s = ((mask * 0.02) + 1) ** dif
tr = self.childGroup.transform()
tr = fn.invertQTransform(tr)
x = s[0] if mouseEnabled[0] == 1 else None
y = s[1] if mouseEnabled[1] == 1 else None
center = Point(tr.map(ev.buttonDownPos(QtCore.Qt.RightButton)))
self._resetTarget()
self.scaleBy(x=x, y=y, center=center)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
| [
"pyqtgraph.functions.invertQTransform",
"pyqtgraph.Point.Point",
"numpy.array"
] | [((1446, 1498), 'numpy.array', 'np.array', (["self.state['mouseEnabled']"], {'dtype': 'np.float'}), "(self.state['mouseEnabled'], dtype=np.float)\n", (1454, 1498), True, 'import numpy as np\n'), ((5736, 5759), 'pyqtgraph.functions.invertQTransform', 'fn.invertQTransform', (['tr'], {}), '(tr)\n', (5755, 5759), True, 'from pyqtgraph import functions as fn\n'), ((2012, 2022), 'pyqtgraph.Point.Point', 'Point', (['pos'], {}), '(pos)\n', (2017, 2022), False, 'from pyqtgraph.Point import Point\n'), ((3393, 3416), 'pyqtgraph.functions.invertQTransform', 'fn.invertQTransform', (['tr'], {}), '(tr)\n', (3412, 3416), True, 'from pyqtgraph import functions as fn\n'), ((2605, 2616), 'pyqtgraph.Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (2610, 2616), False, 'from pyqtgraph.Point import Point\n'), ((4352, 4362), 'pyqtgraph.Point.Point', 'Point', (['pos'], {}), '(pos)\n', (4357, 4362), False, 'from pyqtgraph.Point import Point\n'), ((4913, 4924), 'pyqtgraph.Point.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (4918, 4924), False, 'from pyqtgraph.Point import Point\n')] |
import panel as pn
import dask_cudf
import numpy as np
from .core_aggregate import BaseAggregateChart
from ....assets.numba_kernels import calc_groupby, calc_value_counts
from ....layouts import chart_view
class BaseLine(BaseAggregateChart):
chart_type: str = "line"
reset_event = None
_datatile_loaded_state: bool = False
filter_widget = None
use_data_tiles = True
@property
def datatile_loaded_state(self):
return self._datatile_loaded_state
@datatile_loaded_state.setter
def datatile_loaded_state(self, state: bool):
self._datatile_loaded_state = state
if self.add_interaction:
if state:
self.filter_widget.bar_color = "#8ab4f7"
else:
self.filter_widget.bar_color = "#d3d9e2"
def __init__(
self,
x,
y=None,
data_points=None,
add_interaction=True,
aggregate_fn="count",
width=400,
height=400,
step_size=None,
step_size_type=int,
title="",
autoscaling=True,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
y
data_points
add_interaction
aggregate_fn
width
height
step_size
step_size_type
title
autoscaling
x_label_map
y_label_map
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
self.y = y
self.data_points = data_points
self.add_interaction = add_interaction
self.aggregate_fn = aggregate_fn
self.height = height
self.width = width
self.stride = step_size
self.stride_type = step_size_type
if len(title) == 0:
self.title = self.x
else:
self.title = title
self.autoscaling = autoscaling
self.library_specific_params = library_specific_params
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
Ouput:
"""
if dashboard_cls._data[self.x].dtype == "bool":
self.min_value = 0
self.max_value = 1
self.stride = 1
# set axis labels:
dict_map = {0: "False", 1: "True"}
if len(self.x_label_map) == 0:
self.x_label_map = dict_map
if (
self.y != self.x
and self.y is not None
and len(self.y_label_map) == 0
):
self.y_label_map = dict_map
else:
if type(dashboard_cls._data) == dask_cudf.core.DataFrame:
self.min_value = dashboard_cls._data[self.x].min().compute()
self.max_value = dashboard_cls._data[self.x].max().compute()
else:
self.min_value = dashboard_cls._data[self.x].min()
self.max_value = dashboard_cls._data[self.x].max()
if self.max_value < 1 and self.stride_type == int:
self.stride_type = float
if self.stride is None and self.data_points is not None:
if self.stride_type == int:
self.stride = int(
round(
(self.max_value - self.min_value)
/ self.data_points
)
)
else:
self.stride = float(
(self.max_value - self.min_value) / self.data_points
)
self.calculate_source(dashboard_cls._data)
self.generate_chart()
self.apply_mappers()
if self.add_interaction:
self.add_range_slider_filter(dashboard_cls)
self.add_events(dashboard_cls)
def view(self):
return chart_view(self.chart, self.filter_widget, width=self.width)
def calculate_source(self, data, patch_update=False):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.y == self.x or self.y is None:
# it's a histogram
df, self.data_points, self.custom_binning = calc_value_counts(
data[self.x], self.stride, self.min_value, self.data_points
)
if self.data_points > 50_000:
print(
"number of x-values for a line chart ",
"exceeds 50,000 points.",
"Performance may be laggy, its recommended ",
"to use custom data_points parameter to ",
"enforce custom binning for smooth crossfiltering.",
"Also, checkout datashader.line for ",
"rendering millions of points.",
)
else:
self.aggregate_fn = "mean"
df = calc_groupby(self, data)
if self.data_points is None:
self.data_points = len(df[0])
if self.stride is None:
self.stride = self.stride_type(
round((self.max_value - self.min_value) / self.data_points)
)
if self.custom_binning:
if len(self.x_label_map) == 0:
temp_mapper_index = np.array(df[0])
temp_mapper_value = np.round(
(temp_mapper_index * self.stride) + self.min_value, 4,
).astype("str")
temp_mapper_index = temp_mapper_index.astype("str")
self.x_label_map = dict(
zip(temp_mapper_index, temp_mapper_value)
)
dict_temp = {
"X": list(df[0].astype(df[0].dtype)),
"Y": list(df[1].astype(df[1].dtype)),
}
self.format_source_data(dict_temp, patch_update)
def add_range_slider_filter(self, dashboard_cls):
"""
Description: add range slider to the bottom of the chart,
for the filter function to facilitate interaction
behavior, that updates the rest of the charts on the page,
using datatiles
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.stride is None:
self.stride = self.stride_type(
round((self.max_value - self.min_value) / self.data_points)
)
self.filter_widget = pn.widgets.RangeSlider(
start=self.min_value,
end=self.max_value,
value=(self.min_value, self.max_value),
step=self.stride,
**{"width": self.width},
sizing_mode="scale_width",
)
def filter_widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles()
dashboard_cls._query_datatiles_by_range(event.new)
# add callback to filter_Widget on value change
self.filter_widget.param.watch(
filter_widget_callback, ["value"], onlychanged=False
)
def compute_query_dict(self, query_str_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.filter_widget.value != (
self.filter_widget.start,
self.filter_widget.end,
):
min_temp, max_temp = self.filter_widget.value
query_str_dict[self.name] = (
str(self.stride_type(round(min_temp, 4)))
+ "<="
+ str(self.x)
+ "<="
+ str(self.stride_type(round(max_temp, 4)))
)
else:
query_str_dict.pop(self.name, None)
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(event):
self.filter_widget.value = (
self.filter_widget.start,
self.filter_widget.end,
)
# add callback to reset chart button
self.add_event(self.reset_event, reset_callback)
| [
"panel.widgets.RangeSlider",
"numpy.round",
"numpy.array"
] | [((6883, 7067), 'panel.widgets.RangeSlider', 'pn.widgets.RangeSlider', ([], {'start': 'self.min_value', 'end': 'self.max_value', 'value': '(self.min_value, self.max_value)', 'step': 'self.stride', 'sizing_mode': '"""scale_width"""'}), "(start=self.min_value, end=self.max_value, value=(\n self.min_value, self.max_value), step=self.stride, **{'width': self.\n width}, sizing_mode='scale_width')\n", (6905, 7067), True, 'import panel as pn\n'), ((5672, 5687), 'numpy.array', 'np.array', (['df[0]'], {}), '(df[0])\n', (5680, 5687), True, 'import numpy as np\n'), ((5724, 5785), 'numpy.round', 'np.round', (['(temp_mapper_index * self.stride + self.min_value)', '(4)'], {}), '(temp_mapper_index * self.stride + self.min_value, 4)\n', (5732, 5785), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import json
from json import encoder
import numpy as np
from sklearn_porter.estimator.classifier.Classifier import Classifier
class BernoulliNB(Classifier):
"""
See also
--------
sklearn.naive_bayes.BernoulliNB
http://scikit-learn.org/stable/modules/generated/
sklearn.naive_bayes.BernoulliNB.html
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'java': {
'type': '{0}',
'arr': '{{{0}}}',
'arr[]': '{type}[] {name} = {{{values}}};',
'arr[][]': '{type}[][] {name} = {{{values}}};',
'indent': ' ',
},
'js': {
'type': '{0}',
'arr': '[{0}]',
'arr[]': 'var {name} = [{values}];',
'arr[][]': 'var {name} = [{values}];',
'indent': ' ',
}
}
# @formatter:on
def __init__(self, estimator, target_language='java',
target_method='predict', **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param estimator : BernoulliNB
An instance of a trained BernoulliNB estimator.
:param target_language : string
The target programming language.
:param target_method : string
The target method of the estimator.
"""
super(BernoulliNB, self).__init__(
estimator, target_language=target_language,
target_method=target_method, **kwargs)
self.estimator = estimator
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.n_classes = len(est.classes_)
self.n_features = len(est.feature_log_prob_[0])
temp_type = self.temp('type')
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
# Create class prior probabilities:
priors = [self.temp('type').format(self.repr(p)) for p in
est.class_log_prior_]
priors = ', '.join(priors)
self.priors = temp_arr_.format(type='double', name='priors',
values=priors)
# Create negative probabilities:
neg_prob = np.log(1 - np.exp(est.feature_log_prob_))
probs = []
for prob in neg_prob:
tmp = [temp_type.format(self.repr(p)) for p in prob]
tmp = temp_arr.format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.neg_probs = temp_arr__.format(type='double', name='negProbs',
values=probs)
delta_probs = (est.feature_log_prob_ - neg_prob).T
probs = []
for prob in delta_probs:
tmp = [temp_type.format(self.repr(p)) for p in prob]
tmp = temp_arr.format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.del_probs = temp_arr__.format(type='double', name='delProbs',
values=probs)
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated')
def predict(self, temp_type):
"""
Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string.
"""
# Exported:
if temp_type == 'exported':
temp = self.temp('exported.class')
return temp.format(class_name=self.class_name,
method_name=self.method_name)
# Separated
method = self.create_method()
return self.create_class(method)
def export_data(self, directory, filename, with_md5_hash=False):
"""
Save model data in a JSON file.
Parameters
----------
:param directory : string
The directory.
:param filename : string
The filename.
:param with_md5_hash : bool, default: False
Whether to append the checksum to the filename or not.
"""
neg_prob = np.log(1 - np.exp(self.estimator.feature_log_prob_))
delta_probs = (self.estimator.feature_log_prob_ - neg_prob).T
model_data = {
'priors': self.estimator.class_log_prior_.tolist(),
'negProbs': neg_prob.tolist(),
'delProbs': delta_probs.tolist()
}
encoder.FLOAT_REPR = lambda o: self.repr(o)
json_data = json.dumps(model_data, sort_keys=True)
if with_md5_hash:
import hashlib
json_hash = hashlib.md5(json_data).hexdigest()
filename = filename.split('.json')[0] + '_' + json_hash + '.json'
path = os.path.join(directory, filename)
with open(path, 'w') as fp:
fp.write(json_data)
def create_method(self):
"""
Build the estimator method or function.
Returns
-------
:return : string
The built method as string.
"""
n_indents = 1 if self.target_language in ['java', 'js'] else 0
temp_method = self.temp('separated.method.predict',
n_indents=n_indents, skipping=True)
return temp_method.format(**self.__dict__)
def create_class(self, method):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
self.__dict__.update(dict(method=method))
temp_class = self.temp('separated.class')
return temp_class.format(**self.__dict__)
| [
"hashlib.md5",
"os.path.isdir",
"json.dumps",
"numpy.exp",
"os.path.join"
] | [((6242, 6280), 'json.dumps', 'json.dumps', (['model_data'], {'sort_keys': '(True)'}), '(model_data, sort_keys=True)\n', (6252, 6280), False, 'import json\n'), ((6486, 6519), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (6498, 6519), False, 'import os\n'), ((3582, 3611), 'numpy.exp', 'np.exp', (['est.feature_log_prob_'], {}), '(est.feature_log_prob_)\n', (3588, 3611), True, 'import numpy as np\n'), ((4494, 4519), 'os.path.isdir', 'os.path.isdir', (['export_dir'], {}), '(export_dir)\n', (4507, 4519), False, 'import os\n'), ((5873, 5913), 'numpy.exp', 'np.exp', (['self.estimator.feature_log_prob_'], {}), '(self.estimator.feature_log_prob_)\n', (5879, 5913), True, 'import numpy as np\n'), ((6358, 6380), 'hashlib.md5', 'hashlib.md5', (['json_data'], {}), '(json_data)\n', (6369, 6380), False, 'import hashlib\n')] |
"""Loading words vectors."""
import gzip
import logging
import pickle
from os.path import join, exists
from typing import Iterable, Optional
import numpy as np
from tqdm import tqdm
from debias import config
from debias.utils import py_utils
FASTTEXT_URL = "https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip"
GLOVE_6B_VECS = ["glove.6B.100d", "glove.6B.200d",
"glove.6B.300d", "glove.6B.50d"]
GLOVE_6B_URL = "http://nlp.stanford.edu/data/glove.6B.zip"
def download_word_vectors(vec_name):
if vec_name == "crawl-300d-2M":
download_fasttext()
elif vec_name in GLOVE_6B_VECS:
download_glove_6b()
else:
raise NotImplementedError(
vec_name + " does not exist, and cannot be automatically downloaded, please download manually")
def download_fasttext():
if exists(join(config.WORD_VEC_SOURCE, "crawl-300d-2M.vec")):
return
py_utils.download_zip("crawl-300d-2M.vec", FASTTEXT_URL, config.WORD_VEC_SOURCE)
def download_glove_6b():
if all(exists(join(config.WORD_VEC_SOURCE, x + ".txt")) for x in GLOVE_6B_VECS):
return
py_utils.download_zip("Glove 6B", GLOVE_6B_URL, config.WORD_VEC_SOURCE)
def _find_vec_path(vec_name):
vec_path = join(config.WORD_VEC_SOURCE, vec_name)
if exists(vec_path + ".txt"):
return vec_path + ".txt"
elif exists(vec_path + ".txt.gz"):
return vec_path + ".txt.gz"
elif exists(vec_path + ".pkl"):
return vec_path + ".pkl"
elif exists(vec_path + ".vec"):
return vec_path + ".vec"
else:
return None
def load_word_vectors(vec_name: str, vocab: Optional[Iterable[str]]=None, n_words_to_scan=None):
vec_path = _find_vec_path(vec_name)
if vec_path is None:
download_word_vectors(vec_name)
vec_path = _find_vec_path(vec_name)
if vec_path is None:
raise RuntimeError("Download bug?")
return load_word_vector_file(vec_path, vocab, n_words_to_scan)
def load_word_vector_file(vec_path: str, vocab: Optional[Iterable[str]]=None,
n_words_to_scan=None):
if vocab is not None:
vocab = set(vocab)
if vec_path.endswith(".pkl"):
with open(vec_path, "rb") as f:
return pickle.load(f)
# some of the large vec files produce utf-8 errors for some words, just skip them
elif vec_path.endswith(".txt.gz"):
handle = lambda x: gzip.open(x, 'r', encoding='utf-8', errors='ignore')
else:
handle = lambda x: open(x, 'r', encoding='utf-8', errors='ignore')
if n_words_to_scan is None:
if vocab is None:
logging.info("Loading word vectors from %s..." % vec_path)
else:
logging.info("Loading word vectors from %s for voc size %d..." % (vec_path, len(vocab)))
else:
if vocab is None:
logging.info("Loading up to %d word vectors from %s..." % (n_words_to_scan, vec_path))
else:
logging.info("Loading up to %d word vectors from %s for voc size %d..." % (n_words_to_scan, vec_path, len(vocab)))
words = []
vecs = []
pbar = tqdm(desc="word-vec")
with handle(vec_path) as fh:
for i, line in enumerate(fh):
pbar.update(1)
if n_words_to_scan is not None and i >= n_words_to_scan:
break
word_ix = line.find(" ")
if i == 0 and " " not in line[word_ix+1:]:
# assume a header row, such as found in the fasttext word vectors
continue
word = line[:word_ix]
if (vocab is None) or (word in vocab):
words.append(word)
vecs.append(np.fromstring(line[word_ix+1:], sep=" ", dtype=np.float32))
pbar.close()
return words, vecs
| [
"tqdm.tqdm",
"gzip.open",
"os.path.exists",
"logging.info",
"pickle.load",
"numpy.fromstring",
"os.path.join",
"debias.utils.py_utils.download_zip"
] | [((901, 986), 'debias.utils.py_utils.download_zip', 'py_utils.download_zip', (['"""crawl-300d-2M.vec"""', 'FASTTEXT_URL', 'config.WORD_VEC_SOURCE'], {}), "('crawl-300d-2M.vec', FASTTEXT_URL, config.WORD_VEC_SOURCE\n )\n", (922, 986), False, 'from debias.utils import py_utils\n'), ((1105, 1176), 'debias.utils.py_utils.download_zip', 'py_utils.download_zip', (['"""Glove 6B"""', 'GLOVE_6B_URL', 'config.WORD_VEC_SOURCE'], {}), "('Glove 6B', GLOVE_6B_URL, config.WORD_VEC_SOURCE)\n", (1126, 1176), False, 'from debias.utils import py_utils\n'), ((1222, 1260), 'os.path.join', 'join', (['config.WORD_VEC_SOURCE', 'vec_name'], {}), '(config.WORD_VEC_SOURCE, vec_name)\n', (1226, 1260), False, 'from os.path import join, exists\n'), ((1266, 1291), 'os.path.exists', 'exists', (["(vec_path + '.txt')"], {}), "(vec_path + '.txt')\n", (1272, 1291), False, 'from os.path import join, exists\n'), ((2966, 2987), 'tqdm.tqdm', 'tqdm', ([], {'desc': '"""word-vec"""'}), "(desc='word-vec')\n", (2970, 2987), False, 'from tqdm import tqdm\n'), ((836, 885), 'os.path.join', 'join', (['config.WORD_VEC_SOURCE', '"""crawl-300d-2M.vec"""'], {}), "(config.WORD_VEC_SOURCE, 'crawl-300d-2M.vec')\n", (840, 885), False, 'from os.path import join, exists\n'), ((1329, 1357), 'os.path.exists', 'exists', (["(vec_path + '.txt.gz')"], {}), "(vec_path + '.txt.gz')\n", (1335, 1357), False, 'from os.path import join, exists\n'), ((1398, 1423), 'os.path.exists', 'exists', (["(vec_path + '.pkl')"], {}), "(vec_path + '.pkl')\n", (1404, 1423), False, 'from os.path import join, exists\n'), ((2162, 2176), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2173, 2176), False, 'import pickle\n'), ((2513, 2571), 'logging.info', 'logging.info', (["('Loading word vectors from %s...' % vec_path)"], {}), "('Loading word vectors from %s...' % vec_path)\n", (2525, 2571), False, 'import logging\n'), ((2713, 2803), 'logging.info', 'logging.info', (["('Loading up to %d word vectors from %s...' % (n_words_to_scan, vec_path))"], {}), "('Loading up to %d word vectors from %s...' % (n_words_to_scan,\n vec_path))\n", (2725, 2803), False, 'import logging\n'), ((1025, 1065), 'os.path.join', 'join', (['config.WORD_VEC_SOURCE', "(x + '.txt')"], {}), "(config.WORD_VEC_SOURCE, x + '.txt')\n", (1029, 1065), False, 'from os.path import join, exists\n'), ((1461, 1486), 'os.path.exists', 'exists', (["(vec_path + '.vec')"], {}), "(vec_path + '.vec')\n", (1467, 1486), False, 'from os.path import join, exists\n'), ((2322, 2374), 'gzip.open', 'gzip.open', (['x', '"""r"""'], {'encoding': '"""utf-8"""', 'errors': '"""ignore"""'}), "(x, 'r', encoding='utf-8', errors='ignore')\n", (2331, 2374), False, 'import gzip\n'), ((3442, 3502), 'numpy.fromstring', 'np.fromstring', (['line[word_ix + 1:]'], {'sep': '""" """', 'dtype': 'np.float32'}), "(line[word_ix + 1:], sep=' ', dtype=np.float32)\n", (3455, 3502), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse
import logging
import numpy as np
import os
import re
import torch
import json
import csv
import pytorch_lightning as pl
from pprint import pprint as pp
from model import ErrorCheckerInferenceModule
from nltk import sent_tokenize
from utils.utils import Label
from utils import tokenizer
from utils.sentence_scorer import SentenceScorer
from preprocess import load_games
from generate import Generator
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO, datefmt='%H:%M:%S')
logger = logging.getLogger(__name__)
tokenizer = tokenizer.Tokenizer()
class Decoder:
def __init__(self, args):
self.args = args
ec_model_path = os.path.join(args.exp_dir, args.experiment, args.checkpoint)
self.ec = ErrorCheckerInferenceModule(args, model_path=ec_model_path)
self.ss = SentenceScorer()
def process_input_file(self, row, test_games, f_out):
"""
Produces annotations for a single game.
"""
file = row[0]
test_idx = int(row[3])
text = row[4]
game_data = test_games[test_idx]
logger.info(f"Processing {file}")
sentences = sent_tokenize(text)
doc_token_idx = 0
for sent_idx, sentence in enumerate(sentences):
text = self.ss.retrieve_ctx(sentence=sentence, game_data=game_data, cnt=self.args.ctx)
hyp = sentence
out = self.ec.predict(text=text, hyp=hyp, beam_size=self.args.beam_size, is_hyp_tokenized=True)
for token_idx, (token, tag) in enumerate(out):
doc_token_idx += 1
self.error_id += 1
# skip if OK
if tag == Label.O.name:
continue
logger.info(f"{tag} {token}")
# write an error annotation (need to be careful about indices starting from 1)
out_row = [
file + ".txt",
sent_idx+1,
self.error_id,
token,
token_idx+1,
token_idx+1,
doc_token_idx,
doc_token_idx,
tag,
"",
""
]
out_row = [f'"{column}"' for column in out_row if column is not None]
f_out.write(",".join(out_row) + "\n")
def decode(self):
"""
Produces error annotations for the games.csv file
"""
self.error_id = 0
templates_path = f"./context/{args.templates}"
test_games = load_games(templates_path, rotowire_dir=self.args.rotowire_dir, split="test")
output_path = os.path.join(args.exp_dir, args.experiment, args.out_fname)
with open(args.input_file) as f_in, open(output_path, "w") as f_out:
reader = csv.reader(f_in, delimiter=',', quotechar='"')
# skip header of games.csv
next(reader)
# write a submission header
f_out.write(
'"TEXT_ID","SENTENCE_ID","ANNOTATION_ID","TOKENS","SENT_TOKEN_START",'
'"SENT_TOKEN_END","DOC_TOKEN_START","DOC_TOKEN_END","TYPE","CORRECTION","COMMENT"\n'
)
for row in reader:
# each game is in a separate file
self.process_input_file(row, test_games, f_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", default="experiments", type=str,
help="Base directory of the experiment.")
parser.add_argument("--input_file", default="games.csv", type=str,
help="Input directory.")
parser.add_argument("--out_fname", default="out.csv", type=str,
help="Output file.")
parser.add_argument("--ctx", type=int, default=None, required=True,
help="Number of sentences retrieved for the context.")
parser.add_argument("--experiment", type=str, required=True,
help="Experiment name.")
parser.add_argument("--seed", default=42, type=int,
help="Random seed.")
parser.add_argument("--max_threads", default=4, type=int,
help="Maximum number of threads.")
parser.add_argument("--beam_size", default=1, type=int,
help="Beam size.")
parser.add_argument("--gpus", default=1, type=int,
help="Number of GPUs.")
parser.add_argument("--max_length", type=int, default=512,
help="Maximum number of tokens per example")
parser.add_argument("--checkpoint", type=str, default="model.ckpt",
help="Override the default checkpoint name 'model.ckpt'.")
parser.add_argument("--templates", type=str, default=None,
help="Type of templates (simple / compact).")
parser.add_argument("--rotowire_dir", type=str, default="rotowire",
help="Path to the original Rotowire dataset.")
args = parser.parse_args()
logger.info(args)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.set_num_threads(args.max_threads)
d = Decoder(args)
d.decode() | [
"model.ErrorCheckerInferenceModule",
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.basicConfig",
"utils.tokenizer.Tokenizer",
"utils.sentence_scorer.SentenceScorer",
"torch.manual_seed",
"nltk.sent_tokenize",
"preprocess.load_games",
"csv.reader",
"torch.set_num_threads",
"os.path.j... | [((444, 559), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO', 'datefmt': '"""%H:%M:%S"""'}), "(format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO, datefmt='%H:%M:%S')\n", (463, 559), False, 'import logging\n'), ((565, 592), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (582, 592), False, 'import logging\n'), ((606, 627), 'utils.tokenizer.Tokenizer', 'tokenizer.Tokenizer', ([], {}), '()\n', (625, 627), False, 'from utils import tokenizer\n'), ((3501, 3526), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3524, 3526), False, 'import argparse\n'), ((5003, 5031), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5020, 5031), False, 'import torch\n'), ((5036, 5061), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5050, 5061), True, 'import numpy as np\n'), ((5066, 5105), 'torch.set_num_threads', 'torch.set_num_threads', (['args.max_threads'], {}), '(args.max_threads)\n', (5087, 5105), False, 'import torch\n'), ((724, 784), 'os.path.join', 'os.path.join', (['args.exp_dir', 'args.experiment', 'args.checkpoint'], {}), '(args.exp_dir, args.experiment, args.checkpoint)\n', (736, 784), False, 'import os\n'), ((804, 863), 'model.ErrorCheckerInferenceModule', 'ErrorCheckerInferenceModule', (['args'], {'model_path': 'ec_model_path'}), '(args, model_path=ec_model_path)\n', (831, 863), False, 'from model import ErrorCheckerInferenceModule\n'), ((882, 898), 'utils.sentence_scorer.SentenceScorer', 'SentenceScorer', ([], {}), '()\n', (896, 898), False, 'from utils.sentence_scorer import SentenceScorer\n'), ((1211, 1230), 'nltk.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (1224, 1230), False, 'from nltk import sent_tokenize\n'), ((2668, 2745), 'preprocess.load_games', 'load_games', (['templates_path'], {'rotowire_dir': 'self.args.rotowire_dir', 'split': '"""test"""'}), "(templates_path, rotowire_dir=self.args.rotowire_dir, split='test')\n", (2678, 2745), False, 'from preprocess import load_games\n'), ((2768, 2827), 'os.path.join', 'os.path.join', (['args.exp_dir', 'args.experiment', 'args.out_fname'], {}), '(args.exp_dir, args.experiment, args.out_fname)\n', (2780, 2827), False, 'import os\n'), ((2935, 2981), 'csv.reader', 'csv.reader', (['f_in'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(f_in, delimiter=\',\', quotechar=\'"\')\n', (2945, 2981), False, 'import csv\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the ellipse module.
"""
from astropy.coordinates import Angle, SkyCoord
import astropy.units as u
import numpy as np
import pytest
from .test_aperture_common import BaseTestAperture
from ..ellipse import (EllipticalAperture, EllipticalAnnulus,
SkyEllipticalAperture, SkyEllipticalAnnulus)
POSITIONS = [(10, 20), (30, 40), (50, 60), (70, 80)]
RA, DEC = np.transpose(POSITIONS)
SKYCOORD = SkyCoord(ra=RA, dec=DEC, unit='deg')
UNIT = u.arcsec
RADII = (0.0, -1.0, -np.inf)
class TestEllipticalAperture(BaseTestAperture):
aperture = EllipticalAperture(POSITIONS, a=10., b=5., theta=np.pi/2.)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
EllipticalAperture(POSITIONS, a=radius, b=5., theta=np.pi/2.)
with pytest.raises(ValueError):
EllipticalAperture(POSITIONS, a=10., b=radius, theta=np.pi/2.)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.a = 20.
assert aper != self.aperture
class TestEllipticalAnnulus(BaseTestAperture):
aperture = EllipticalAnnulus(POSITIONS, a_in=10., a_out=20., b_out=17,
theta=np.pi/3)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
EllipticalAnnulus(POSITIONS, a_in=radius, a_out=20., b_out=17,
theta=np.pi/3)
with pytest.raises(ValueError):
EllipticalAnnulus(POSITIONS, a_in=10., a_out=radius, b_out=17,
theta=np.pi/3)
with pytest.raises(ValueError):
EllipticalAnnulus(POSITIONS, a_in=10., a_out=20., b_out=radius,
theta=np.pi/3)
with pytest.raises(ValueError):
EllipticalAnnulus(POSITIONS, a_in=10., a_out=20., b_out=17,
b_in=radius, theta=np.pi/3)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.a_in = 2.
assert aper != self.aperture
class TestSkyEllipticalAperture(BaseTestAperture):
aperture = SkyEllipticalAperture(SKYCOORD, a=10.*UNIT, b=5.*UNIT,
theta=30*u.deg)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
SkyEllipticalAperture(SKYCOORD, a=radius*UNIT, b=5.*UNIT,
theta=30*u.deg)
with pytest.raises(ValueError):
SkyEllipticalAperture(SKYCOORD, a=10.*UNIT, b=radius*UNIT,
theta=30*u.deg)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.a = 2. * UNIT
assert aper != self.aperture
class TestSkyEllipticalAnnulus(BaseTestAperture):
aperture = SkyEllipticalAnnulus(SKYCOORD, a_in=10.*UNIT, a_out=20.*UNIT,
b_out=17.*UNIT, theta=60*u.deg)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
SkyEllipticalAnnulus(SKYCOORD, a_in=radius*UNIT, a_out=20.*UNIT,
b_out=17.*UNIT, theta=60*u.deg)
with pytest.raises(ValueError):
SkyEllipticalAnnulus(SKYCOORD, a_in=10.*UNIT, a_out=radius*UNIT,
b_out=17.*UNIT, theta=60*u.deg)
with pytest.raises(ValueError):
SkyEllipticalAnnulus(SKYCOORD, a_in=10.*UNIT, a_out=20.*UNIT,
b_out=radius*UNIT, theta=60*u.deg)
with pytest.raises(ValueError):
SkyEllipticalAnnulus(SKYCOORD, a_in=10.*UNIT, a_out=20.*UNIT,
b_out=17.*UNIT, b_in=radius*UNIT,
theta=60*u.deg)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.a_in = 2. * UNIT
assert aper != self.aperture
def test_ellipse_theta_quantity():
aper1 = EllipticalAperture(POSITIONS, a=10., b=5., theta=np.pi/2.)
theta = u.Quantity(90 * u.deg)
aper2 = EllipticalAperture(POSITIONS, a=10., b=5., theta=theta)
theta = Angle(90 * u.deg)
aper3 = EllipticalAperture(POSITIONS, a=10., b=5., theta=theta)
assert aper1._theta_radians == aper2._theta_radians
assert aper1._theta_radians == aper3._theta_radians
def test_ellipse_annulus_theta_quantity():
aper1 = EllipticalAnnulus(POSITIONS, a_in=10., a_out=20., b_out=17,
theta=np.pi/3)
theta = u.Quantity(60 * u.deg)
aper2 = EllipticalAnnulus(POSITIONS, a_in=10., a_out=20., b_out=17,
theta=theta)
theta = Angle(60 * u.deg)
aper3 = EllipticalAnnulus(POSITIONS, a_in=10., a_out=20., b_out=17,
theta=theta)
assert aper1._theta_radians == aper2._theta_radians
assert aper1._theta_radians == aper3._theta_radians
| [
"astropy.units.Quantity",
"numpy.transpose",
"pytest.raises",
"pytest.mark.parametrize",
"astropy.coordinates.Angle",
"astropy.coordinates.SkyCoord"
] | [((457, 480), 'numpy.transpose', 'np.transpose', (['POSITIONS'], {}), '(POSITIONS)\n', (469, 480), True, 'import numpy as np\n'), ((492, 528), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'RA', 'dec': 'DEC', 'unit': '"""deg"""'}), "(ra=RA, dec=DEC, unit='deg')\n", (500, 528), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((722, 762), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radius"""', 'RADII'], {}), "('radius', RADII)\n", (745, 762), False, 'import pytest\n'), ((1385, 1425), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radius"""', 'RADII'], {}), "('radius', RADII)\n", (1408, 1425), False, 'import pytest\n'), ((2476, 2516), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radius"""', 'RADII'], {}), "('radius', RADII)\n", (2499, 2516), False, 'import pytest\n'), ((3262, 3302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radius"""', 'RADII'], {}), "('radius', RADII)\n", (3285, 3302), False, 'import pytest\n'), ((4405, 4427), 'astropy.units.Quantity', 'u.Quantity', (['(90 * u.deg)'], {}), '(90 * u.deg)\n', (4415, 4427), True, 'import astropy.units as u\n'), ((4508, 4525), 'astropy.coordinates.Angle', 'Angle', (['(90 * u.deg)'], {}), '(90 * u.deg)\n', (4513, 4525), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((4881, 4903), 'astropy.units.Quantity', 'u.Quantity', (['(60 * u.deg)'], {}), '(60 * u.deg)\n', (4891, 4903), True, 'import astropy.units as u\n'), ((5031, 5048), 'astropy.coordinates.Angle', 'Angle', (['(60 * u.deg)'], {}), '(60 * u.deg)\n', (5036, 5048), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((813, 838), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (826, 838), False, 'import pytest\n'), ((927, 952), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (940, 952), False, 'import pytest\n'), ((1476, 1501), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1489, 1501), False, 'import pytest\n'), ((1636, 1661), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1649, 1661), False, 'import pytest\n'), ((1796, 1821), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1809, 1821), False, 'import pytest\n'), ((1957, 1982), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1970, 1982), False, 'import pytest\n'), ((2567, 2592), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2580, 2592), False, 'import pytest\n'), ((2727, 2752), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2740, 2752), False, 'import pytest\n'), ((3353, 3378), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3366, 3378), False, 'import pytest\n'), ((3535, 3560), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3548, 3560), False, 'import pytest\n'), ((3717, 3742), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3730, 3742), False, 'import pytest\n'), ((3899, 3924), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3912, 3924), False, 'import pytest\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 14:38:58 2017
@author: bonny
"""
from sklearn.metrics import explained_variance_score
import pandas as pd
from sklearn.model_selection import KFold
from sklearn import linear_model, ensemble
import numpy as np
from CleanHousingData import CleanHousingData,NormalizeHousingData
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Read the train file
tr_data = pd.read_csv('train.csv')
#Print head and dexcription
#print(tr_data.head())
#print(tr_data.describe())
#Clean the data
tr_data = CleanHousingData(tr_data)
#Choose predictors
predictors = list(tr_data.columns[1:].values)
predictors.remove('SalePrice')
#Split the data with the folds
kf = KFold(n_splits=3, random_state=1, shuffle=True)
for train_index, test_index in kf.split(tr_data):
trainsplit = tr_data.iloc[train_index,:]
testsplit = tr_data.iloc[test_index,:]
#Finding out which algorithm adjusts better to the data
#Create the algorithm dictionary
ARD= linear_model.ARDRegression()
LinRe = linear_model.LinearRegression()
SGD = linear_model.SGDRegressor()
BR= linear_model.BayesianRidge()
Lars = linear_model.Lars()
Lasso = linear_model.Lasso()
PA = linear_model.PassiveAggressiveRegressor()
RANSAC = linear_model.RANSACRegressor()
Gboost = ensemble.GradientBoostingRegressor()
algorithms = {'Linear Regression':LinRe,
'Bayesian ARD regression':ARD,
'BayesianRidge': BR,'Lars': Lars,
'Lasso':Lasso,
'PassiveAggressiveRegressor':PA ,
'RANSACRegressor':RANSAC,
'GradientBoostingRegressor':Gboost
}
color = {'Linear Regression':'blue','Bayesian ARD regression':'red',
'BayesianRidge':'yellow', 'Lars': 'green','Lasso': 'orange'
,'PassiveAggressiveRegressor':'purple','RANSACRegressor':'brown',
'GradientBoostingRegressor':'cyan'}
bestvarscore = 0
bestmeanerror = float('inf')
secondbestvarscore = 0
secondbestmeanerror = float('inf')
bestalg = 0
secondbestalg = 0
SecondBestOutput = 0
BestOutput = 0
allpredictions = []
for alg in algorithms:
algorithms[alg].fit(trainsplit[predictors],trainsplit['SalePrice'])
Output = algorithms[alg].predict(testsplit[predictors])
allpredictions.append(Output)
# The mean squared error
meanerror = np.mean((Output - testsplit['SalePrice']) ** 2)
print(alg + ' ' + " Mean squared error: %.3f" % meanerror)
# Explained variance score: 1 is perfect prediction
varscore = algorithms[alg].score(testsplit[predictors], testsplit['SalePrice'])
print(alg + ' ' + ' Variance score: %.3f' % varscore)
if varscore > bestvarscore:
SecondBestOutput = BestOutput
BestOutput = Output
secondbestalg = bestalg
bestalg = alg
secondbestvarscore = bestvarscore
bestvarscore = varscore
secondbestmeanerror = bestmeanerror
bestmeanerror = meanerror
elif varscore == bestvarscore and meanerror < bestmeanerror:
SecondBestOutput = BestOutput
BestOutput = Output
secondbestalg = bestalg
bestalg = alg
secondbestvarscore = varscore
bestvarscore = varscore
secondbestmeanerror = bestmeanerror
bestmeanerror = meanerror
elif varscore > secondbestvarscore:
SecondBestOutput = Output
secondbestalg = alg
secondbestvarscore = varscore
secondbestmeanerror = bestmeanerror
elif varscore == secondbestvarscore and meanerror < secondbestmeanerror:
SecondBestOutput = Output
secondbestalg = alg
secondbestvarscore = varscore
secondbestmeanerror = bestmeanerror
print("Best fitted algorithm is: " + bestalg + " with %.3f " % bestvarscore + 'of variance score' )
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(BestOutput, testsplit['SalePrice'], color='black')
ax.plot(BestOutput, testsplit['SalePrice'], color[bestalg])
plt.title(bestalg)
plt.xticks(())
plt.yticks(())
ax.set_zticks(())
plt.show()
print("Second Best fitted algorithm is: " + secondbestalg + " with %.3f " % secondbestvarscore + 'of variance score')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax = fig.gca()
ax.scatter(BestOutput, testsplit['SalePrice'], color='black')
ax.plot(BestOutput, testsplit['SalePrice'], color[secondbestalg])
plt.title(secondbestalg)
plt.xticks(())
plt.yticks(())
ax.set_zticks(())
plt.show()
averageprediction = sum(allpredictions)/len(allpredictions)
AverageScore = explained_variance_score(testsplit['SalePrice'],averageprediction)
print('Average: %.3f' % AverageScore) | [
"matplotlib.pyplot.title",
"pandas.read_csv",
"sklearn.ensemble.GradientBoostingRegressor",
"matplotlib.pyplot.figure",
"numpy.mean",
"sklearn.linear_model.PassiveAggressiveRegressor",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"CleanHousingData.CleanHousingData",
"sklearn.linear_mode... | [((454, 478), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (465, 478), True, 'import pandas as pd\n'), ((585, 610), 'CleanHousingData.CleanHousingData', 'CleanHousingData', (['tr_data'], {}), '(tr_data)\n', (601, 610), False, 'from CleanHousingData import CleanHousingData, NormalizeHousingData\n'), ((746, 793), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)', 'random_state': '(1)', 'shuffle': '(True)'}), '(n_splits=3, random_state=1, shuffle=True)\n', (751, 793), False, 'from sklearn.model_selection import KFold\n'), ((1030, 1058), 'sklearn.linear_model.ARDRegression', 'linear_model.ARDRegression', ([], {}), '()\n', (1056, 1058), False, 'from sklearn import linear_model, ensemble\n'), ((1067, 1098), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1096, 1098), False, 'from sklearn import linear_model, ensemble\n'), ((1105, 1132), 'sklearn.linear_model.SGDRegressor', 'linear_model.SGDRegressor', ([], {}), '()\n', (1130, 1132), False, 'from sklearn import linear_model, ensemble\n'), ((1137, 1165), 'sklearn.linear_model.BayesianRidge', 'linear_model.BayesianRidge', ([], {}), '()\n', (1163, 1165), False, 'from sklearn import linear_model, ensemble\n'), ((1173, 1192), 'sklearn.linear_model.Lars', 'linear_model.Lars', ([], {}), '()\n', (1190, 1192), False, 'from sklearn import linear_model, ensemble\n'), ((1201, 1221), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {}), '()\n', (1219, 1221), False, 'from sklearn import linear_model, ensemble\n'), ((1227, 1268), 'sklearn.linear_model.PassiveAggressiveRegressor', 'linear_model.PassiveAggressiveRegressor', ([], {}), '()\n', (1266, 1268), False, 'from sklearn import linear_model, ensemble\n'), ((1278, 1308), 'sklearn.linear_model.RANSACRegressor', 'linear_model.RANSACRegressor', ([], {}), '()\n', (1306, 1308), False, 'from sklearn import linear_model, ensemble\n'), ((1318, 1354), 'sklearn.ensemble.GradientBoostingRegressor', 'ensemble.GradientBoostingRegressor', ([], {}), '()\n', (1352, 1354), False, 'from sklearn import linear_model, ensemble\n'), ((3868, 3880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3878, 3880), True, 'import matplotlib.pyplot as plt\n'), ((4034, 4052), 'matplotlib.pyplot.title', 'plt.title', (['bestalg'], {}), '(bestalg)\n', (4043, 4052), True, 'import matplotlib.pyplot as plt\n'), ((4053, 4067), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (4063, 4067), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4082), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (4078, 4082), True, 'import matplotlib.pyplot as plt\n'), ((4101, 4111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4109, 4111), True, 'import matplotlib.pyplot as plt\n'), ((4254, 4266), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4264, 4266), True, 'import matplotlib.pyplot as plt\n'), ((4441, 4465), 'matplotlib.pyplot.title', 'plt.title', (['secondbestalg'], {}), '(secondbestalg)\n', (4450, 4465), True, 'import matplotlib.pyplot as plt\n'), ((4466, 4480), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (4476, 4480), True, 'import matplotlib.pyplot as plt\n'), ((4481, 4495), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (4491, 4495), True, 'import matplotlib.pyplot as plt\n'), ((4514, 4524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4522, 4524), True, 'import matplotlib.pyplot as plt\n'), ((4603, 4670), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (["testsplit['SalePrice']", 'averageprediction'], {}), "(testsplit['SalePrice'], averageprediction)\n", (4627, 4670), False, 'from sklearn.metrics import explained_variance_score\n'), ((2359, 2406), 'numpy.mean', 'np.mean', (["((Output - testsplit['SalePrice']) ** 2)"], {}), "((Output - testsplit['SalePrice']) ** 2)\n", (2366, 2406), True, 'import numpy as np\n')] |
import os
import sys
import gzip
import zlib
import json
import bz2
import tempfile
import requests
import subprocess
from aenum import Enum
import capnp
import numpy as np
import platform
from tools.lib.exceptions import DataUnreadableError
try:
from xx.chffr.lib.filereader import FileReader
except ImportError:
from tools.lib.filereader import FileReader
from tools.lib.log_util import convert_old_pkt_to_new
from cereal import log as capnp_log
OP_PATH = os.path.dirname(os.path.dirname(capnp_log.__file__))
def index_log(fn):
index_log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "index_log")
index_log = os.path.join(index_log_dir, "index_log")
phonelibs_dir = os.path.join(OP_PATH, 'phonelibs')
#subprocess.check_call(["make", "PHONELIBS=" + phonelibs_dir], cwd=index_log_dir, stdout=subprocess.DEVNULL)
try:
dat = subprocess.check_output([index_log, fn, "-"])
except subprocess.CalledProcessError:
raise DataUnreadableError("%s capnp is corrupted/truncated" % fn)
return np.frombuffer(dat, dtype=np.uint64)
def event_read_multiple_bytes(dat):
with tempfile.NamedTemporaryFile() as dat_f:
dat_f.write(dat)
dat_f.flush()
idx = index_log(dat_f.name)
end_idx = np.uint64(len(dat))
idx = np.append(idx, end_idx)
return [capnp_log.Event.from_bytes(dat[idx[i]:idx[i+1]])
for i in range(len(idx)-1)]
# this is an iterator itself, and uses private variables from LogReader
class MultiLogIterator(object):
def __init__(self, log_paths, wraparound=True):
self._log_paths = log_paths
self._wraparound = wraparound
self._first_log_idx = next(i for i in range(len(log_paths)) if log_paths[i] is not None)
self._current_log = self._first_log_idx
self._idx = 0
self._log_readers = [None]*len(log_paths)
self.start_time = self._log_reader(self._first_log_idx)._ts[0]
def _log_reader(self, i):
if self._log_readers[i] is None and self._log_paths[i] is not None:
log_path = self._log_paths[i]
print("LogReader:%s" % log_path)
self._log_readers[i] = LogReader(log_path)
return self._log_readers[i]
def __iter__(self):
return self
def _inc(self):
lr = self._log_reader(self._current_log)
if self._idx < len(lr._ents)-1:
self._idx += 1
else:
self._idx = 0
self._current_log = next(i for i in range(self._current_log + 1, len(self._log_readers) + 1)
if i == len(self._log_readers) or self._log_paths[i] is not None)
# wraparound
if self._current_log == len(self._log_readers):
if self._wraparound:
self._current_log = self._first_log_idx
else:
raise StopIteration
def __next__(self):
while 1:
lr = self._log_reader(self._current_log)
ret = lr._ents[self._idx]
if lr._do_conversion:
ret = convert_old_pkt_to_new(ret, lr.data_version)
self._inc()
return ret
def tell(self):
# returns seconds from start of log
return (self._log_reader(self._current_log)._ts[self._idx] - self.start_time) * 1e-9
def seek(self, ts):
# seek to nearest minute
minute = int(ts/60)
if minute >= len(self._log_paths) or self._log_paths[minute] is None:
return False
self._current_log = minute
# HACK: O(n) seek afterward
self._idx = 0
while self.tell() < ts:
self._inc()
return True
class LogReader(object):
def __init__(self, fn, canonicalize=True, only_union_types=False):
_, ext = os.path.splitext(fn)
data_version = None
with FileReader(fn) as f:
dat = f.read()
# decompress file
if ext == ".gz" and ("log_" in fn or "log2" in fn):
dat = zlib.decompress(dat, zlib.MAX_WBITS | 32)
elif ext == ".bz2":
dat = bz2.decompress(dat)
elif ext == ".7z":
if platform.system() == "Darwin":
os.environ["LA_LIBRARY_FILEPATH"] = "/usr/local/opt/libarchive/lib/libarchive.dylib"
import libarchive.public
with libarchive.public.memory_reader(dat) as aa:
mdat = []
for it in aa:
for bb in it.get_blocks():
mdat.append(bb)
dat = ''.join(mdat)
# TODO: extension shouln't be a proxy for DeviceType
if ext == "":
if dat[0] == "[":
needs_conversion = True
ents = [json.loads(x) for x in dat.strip().split("\n")[:-1]]
if "_" in fn:
data_version = fn.split("_")[1]
else:
# old rlogs weren't bz2 compressed
needs_conversion = False
ents = event_read_multiple_bytes(dat)
elif ext == ".gz":
if "log_" in fn:
# Zero data file.
ents = [json.loads(x) for x in dat.strip().split("\n")[:-1]]
needs_conversion = True
elif "log2" in fn:
needs_conversion = False
ents = event_read_multiple_bytes(dat)
else:
raise Exception("unknown extension")
elif ext == ".bz2":
needs_conversion = False
ents = event_read_multiple_bytes(dat)
elif ext == ".7z":
needs_conversion = True
ents = [json.loads(x) for x in dat.strip().split("\n")]
else:
raise Exception("unknown extension")
if needs_conversion:
# TODO: should we call convert_old_pkt_to_new to generate this?
self._ts = [x[0][0]*1e9 for x in ents]
else:
self._ts = [x.logMonoTime for x in ents]
self.data_version = data_version
self._do_conversion = needs_conversion and canonicalize
self._only_union_types = only_union_types
self._ents = ents
def __iter__(self):
for ent in self._ents:
if self._do_conversion:
yield convert_old_pkt_to_new(ent, self.data_version)
elif self._only_union_types:
try:
ent.which()
yield ent
except capnp.lib.capnp.KjException:
pass
else:
yield ent
def load_many_logs_canonical(log_paths):
"""Load all logs for a sequence of log paths."""
for log_path in log_paths:
for msg in LogReader(log_path):
yield msg
if __name__ == "__main__":
log_path = sys.argv[1]
lr = LogReader(log_path)
for msg in lr:
print(msg)
| [
"tempfile.NamedTemporaryFile",
"cereal.log.Event.from_bytes",
"tools.lib.log_util.convert_old_pkt_to_new",
"json.loads",
"tools.lib.filereader.FileReader",
"numpy.frombuffer",
"os.path.dirname",
"subprocess.check_output",
"os.path.realpath",
"tools.lib.exceptions.DataUnreadableError",
"numpy.app... | [((485, 520), 'os.path.dirname', 'os.path.dirname', (['capnp_log.__file__'], {}), '(capnp_log.__file__)\n', (500, 520), False, 'import os\n'), ((650, 690), 'os.path.join', 'os.path.join', (['index_log_dir', '"""index_log"""'], {}), "(index_log_dir, 'index_log')\n", (662, 690), False, 'import os\n'), ((711, 745), 'os.path.join', 'os.path.join', (['OP_PATH', '"""phonelibs"""'], {}), "(OP_PATH, 'phonelibs')\n", (723, 745), False, 'import os\n'), ((1057, 1092), 'numpy.frombuffer', 'np.frombuffer', (['dat'], {'dtype': 'np.uint64'}), '(dat, dtype=np.uint64)\n', (1070, 1092), True, 'import numpy as np\n'), ((1308, 1331), 'numpy.append', 'np.append', (['idx', 'end_idx'], {}), '(idx, end_idx)\n', (1317, 1331), True, 'import numpy as np\n'), ((884, 929), 'subprocess.check_output', 'subprocess.check_output', (["[index_log, fn, '-']"], {}), "([index_log, fn, '-'])\n", (907, 929), False, 'import subprocess\n'), ((1140, 1169), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1167, 1169), False, 'import tempfile\n'), ((1345, 1395), 'cereal.log.Event.from_bytes', 'capnp_log.Event.from_bytes', (['dat[idx[i]:idx[i + 1]]'], {}), '(dat[idx[i]:idx[i + 1]])\n', (1371, 1395), True, 'from cereal import log as capnp_log\n'), ((3829, 3849), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (3845, 3849), False, 'import os\n'), ((592, 618), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (608, 618), False, 'import os\n'), ((986, 1045), 'tools.lib.exceptions.DataUnreadableError', 'DataUnreadableError', (["('%s capnp is corrupted/truncated' % fn)"], {}), "('%s capnp is corrupted/truncated' % fn)\n", (1005, 1045), False, 'from tools.lib.exceptions import DataUnreadableError\n'), ((3892, 3906), 'tools.lib.filereader.FileReader', 'FileReader', (['fn'], {}), '(fn)\n', (3902, 3906), False, 'from tools.lib.filereader import FileReader\n'), ((4045, 4086), 'zlib.decompress', 'zlib.decompress', (['dat', '(zlib.MAX_WBITS | 32)'], {}), '(dat, zlib.MAX_WBITS | 32)\n', (4060, 4086), False, 'import zlib\n'), ((3104, 3148), 'tools.lib.log_util.convert_old_pkt_to_new', 'convert_old_pkt_to_new', (['ret', 'lr.data_version'], {}), '(ret, lr.data_version)\n', (3126, 3148), False, 'from tools.lib.log_util import convert_old_pkt_to_new\n'), ((4133, 4152), 'bz2.decompress', 'bz2.decompress', (['dat'], {}), '(dat)\n', (4147, 4152), False, 'import bz2\n'), ((4778, 4791), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (4788, 4791), False, 'import json\n'), ((6310, 6356), 'tools.lib.log_util.convert_old_pkt_to_new', 'convert_old_pkt_to_new', (['ent', 'self.data_version'], {}), '(ent, self.data_version)\n', (6332, 6356), False, 'from tools.lib.log_util import convert_old_pkt_to_new\n'), ((4195, 4212), 'platform.system', 'platform.system', ([], {}), '()\n', (4210, 4212), False, 'import platform\n'), ((5191, 5204), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (5201, 5204), False, 'import json\n'), ((5679, 5692), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (5689, 5692), False, 'import json\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 TsinghuaAI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Many thanks for following projects.
# https://github.com/TsinghuaAI/CPM-Generate
# https://github.com/jm12138/CPM-Generate-Paddle
import sys
import argparse
import numpy as np
import paddle
from paddlenlp.transformers import GPTModel, GPTForGreedyGeneration
from paddlenlp.transformers import GPTChineseTokenizer, GPTTokenizer
from paddlenlp.utils.log import logger
MODEL_CLASSES = {
"gpt-cn": (GPTForGreedyGeneration, GPTChineseTokenizer),
"gpt": (GPTForGreedyGeneration, GPTTokenizer),
}
class Demo:
def __init__(self,
model_type="gpt-cn",
model_name_or_path="gpt-cpm-large-cn",
max_predict_len=32):
model_class, tokenizer_class = MODEL_CLASSES[model_type]
self.tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
logger.info('Loading the model parameters, please wait...')
self.model = model_class.from_pretrained(
model_name_or_path,
max_predict_len=max_predict_len,
eol_token_id=self.tokenizer.eol_token_id)
self.model.eval()
logger.info('Model loaded.')
# prediction function
def predict(self, text):
ids = self.tokenizer(text)["input_ids"]
input_ids = paddle.to_tensor(
np.array(ids).reshape(1, -1).astype('int64'))
out = self.model(input_ids)
out = [int(x) for x in out.numpy().reshape([-1])]
logger.info(self.tokenizer.convert_ids_to_string(out))
# One shot example
def ask_question_cn(self, question):
self.predict("问题:中国的首都是哪里?答案:北京。\n问题:%s 答案:" % question)
def ask_question_en(self, question):
self.predict(
"Question: Where is the capital of China? Answer: Beijing. \n Question:%s Answer:"
% question)
# dictation poetry
def dictation_poetry_cn(self, front):
self.predict('''默写古诗: 大漠孤烟直,长河落日圆。\n%s''' % front)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "gpt-cn":
demo = Demo("gpt-cn", "gpt-cpm-large-cn")
demo.ask_question_cn("苹果的CEO是谁?")
demo.dictation_poetry_cn("举杯邀明月,")
else:
demo = Demo("gpt", "gpt2-medium-en")
demo.ask_question_en("Who is the CEO of Apple?")
| [
"paddlenlp.utils.log.logger.info",
"numpy.array"
] | [((1474, 1533), 'paddlenlp.utils.log.logger.info', 'logger.info', (['"""Loading the model parameters, please wait..."""'], {}), "('Loading the model parameters, please wait...')\n", (1485, 1533), False, 'from paddlenlp.utils.log import logger\n'), ((1749, 1777), 'paddlenlp.utils.log.logger.info', 'logger.info', (['"""Model loaded."""'], {}), "('Model loaded.')\n", (1760, 1777), False, 'from paddlenlp.utils.log import logger\n'), ((1932, 1945), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (1940, 1945), True, 'import numpy as np\n')] |
'''Evaluation: evaluates a batch of experiments.
* loads multi experiment files.
* verify parameters -- if they're compatible proceed
* for each experiment
loads all Q-tables, from that experiment.
filter Q-tables from S to S steps.
for each table runs R rollouts (defaults 1).
'''
__author__ = '<NAME>'
__date__ = '2020-04-07'
from os import environ
from pathlib import Path
import json
import re
import random
from copy import deepcopy
import configargparse
import dill
import numpy as np
from flow.core.params import SumoParams, EnvParams
from ilurl.core.params import QLParams
from ilurl.core.experiment import Experiment
import ilurl.core.ql.dpq as ql
from ilurl.envs.base import TrafficLightEnv
from ilurl.networks.base import Network
#TODO: move this into network
from ilurl.loaders.nets import get_tls_custom
ILURL_HOME = environ['ILURL_HOME']
Q_FINDER_PROG = re.compile(r'Q.1-(\d+)')
def search_Q(x):
found = Q_FINDER_PROG.search(x)
if found is None:
raise ValueError('Q-table rollout number not found')
res, = found.groups()
return int(res)
def get_arguments(config_file_path):
if config_file_path is None:
config_file_path = []
parser = configargparse.ArgumentParser(
default_config_files=config_file_path,
description="""
This script performs a single rollout from a Q table
"""
)
parser.add_argument('--rollout-path', '-q', dest='rollout_path',
type=str, nargs='?',
help='''The path Q.1-xxx.pickle files''')
parser.add_argument('--cycles', '-c', dest='cycles', type=int,
default=100, nargs='?',
help='Number of cycles for a single rollout of a Q-table.')
parser.add_argument('--emission', '-e', dest='emission', type=str2bool,
default=False, nargs='?',
help='Enabled will perform saves')
parser.add('--rollout-seed', '-d', dest='seed', type=int,
default=None, nargs='?',
help='''Sets seed value for both rl agent and Sumo.
`None` for rl agent defaults to RandomState()
`None` for Sumo defaults to a fixed but arbitrary seed''')
parser.add_argument('--num-rollouts', '-r', dest='num_rollouts',
type=int, default=1, nargs='?',
help='''Number of repetitions for each table''')
parser.add_argument('--switch', '-w', dest='switch', type=str2bool,
default=False, nargs='?',
help=
'''Rollout demand distribution can be either
`lane` or `switch` defaults to lane''')
return parser.parse_args()
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise configargparse.ArgumentTypeError('Boolean value expected.')
def evaluate(env_params, sim_params, programs,
agent, network, horizon, ex_id, roll_id, qtb):
"""Evaluate
Params:
-------
* env_params: ilurl.core.params.EnvParams
objects parameters
* sim_params: ilurl.core.params.SumoParams
objects parameters
* programs: dict<string, dict<int, list<int>>
keys: junction_id, action_id
values: list of durations
* agent: ilurl.dpq.MAIQ
tabular Q-learning agent
* network: ilurl.networks.Network
object representing the network
* horizon: int
number of steps
* qtb: dict<string, dict>
keys: q-table id, values: number of steps
Returns:
--------
* info: dict
evaluation metrics for experiment
"""
if sim_params.emission_path:
dir_path = sim_params.emission_path
# old emission pattern: network.name-emission.xml
# new emission pattern: network.name.ex_id-emission.xml
network = deepcopy(network)
network.name = f'{network.name}.{roll_id}'
else:
dir_path = None
env1 = TrafficLightEnv(
env_params,
sim_params,
agent,
network,
TLS_programs=programs
)
if qtb is not None:
env1.Q = qtb
env1.stop = True
exp = Experiment(
env1,
dir_path=dir_path,
train=False,
)
result = exp.run(horizon)
result['id'] = ex_id
result['discount'] = agent.ql_params.gamma
if sim_params.seed:
result['seed'] = [sim_params.seed]
result['rollouts'] = [roll_id]
return result
def roll(config_file_path=None):
args = get_arguments(config_file_path)
rollout_path = Path(args.rollout_path)
# rollout_number = args.rollout_number
# x = 'w' if args.switch else 'l'
cycles = args.cycles
with rollout_path.open('rb') as f:
qtb = dill.load(f)
if qtb is None:
raise ValueError('Q is None')
rollout_number = search_Q(str(rollout_path))
pattern = '*.params.json'
params = None
for params_path in rollout_path.parent.glob(pattern):
with params_path.open('r') as f:
params = json.load(f)
break # There should be only one match
if params is None:
raise ValueError('params is None')
# TODO: test ground truth
params['sumo_args']['render'] = False
if args.emission:
rollout_path = Path(rollout_path)
rollout_path = rollout_path.parents[0]
params['sumo_args']['emission_path'] = rollout_path
else:
if 'emission_path' in params['sumo_args']:
del params['sumo_args']['emission_path']
if args.seed:
random.seed(args.seed)
np.random.seed(args.seed)
params['sumo_args']['seed'] = args.seed
# Load cycle time and TLS programs.
ex_id = rollout_path.parts[-2]
network = Network(**params['network_args'])
cycle_time, programs = get_tls_custom(network.network_id)
ql_params = QLParams(**params['ql_args'])
cls_agent = getattr(ql, ql_params.agent_id)
agent = cls_agent(ql_params)
env_params = EnvParams(**params['env_args'])
sim_params = SumoParams(**params['sumo_args'])
horizon = int((cycle_time * cycles) / sim_params.sim_step)
info = evaluate(env_params, sim_params, programs,
agent, network, horizon, ex_id, rollout_number, qtb)
info['horizon'] = horizon
return info
if __name__ == '__main__':
roll()
| [
"copy.deepcopy",
"json.load",
"ilurl.networks.base.Network",
"flow.core.params.EnvParams",
"numpy.random.seed",
"ilurl.envs.base.TrafficLightEnv",
"ilurl.loaders.nets.get_tls_custom",
"configargparse.ArgumentTypeError",
"flow.core.params.SumoParams",
"dill.load",
"pathlib.Path",
"ilurl.core.ex... | [((911, 935), 're.compile', 're.compile', (['"""Q.1-(\\\\d+)"""'], {}), "('Q.1-(\\\\d+)')\n", (921, 935), False, 'import re\n'), ((1234, 1410), 'configargparse.ArgumentParser', 'configargparse.ArgumentParser', ([], {'default_config_files': 'config_file_path', 'description': '"""\n This script performs a single rollout from a Q table\n """'}), '(default_config_files=config_file_path,\n description=\n """\n This script performs a single rollout from a Q table\n """\n )\n', (1263, 1410), False, 'import configargparse\n'), ((4335, 4413), 'ilurl.envs.base.TrafficLightEnv', 'TrafficLightEnv', (['env_params', 'sim_params', 'agent', 'network'], {'TLS_programs': 'programs'}), '(env_params, sim_params, agent, network, TLS_programs=programs)\n', (4350, 4413), False, 'from ilurl.envs.base import TrafficLightEnv\n'), ((4537, 4585), 'ilurl.core.experiment.Experiment', 'Experiment', (['env1'], {'dir_path': 'dir_path', 'train': '(False)'}), '(env1, dir_path=dir_path, train=False)\n', (4547, 4585), False, 'from ilurl.core.experiment import Experiment\n'), ((4936, 4959), 'pathlib.Path', 'Path', (['args.rollout_path'], {}), '(args.rollout_path)\n', (4940, 4959), False, 'from pathlib import Path\n'), ((6118, 6151), 'ilurl.networks.base.Network', 'Network', ([], {}), "(**params['network_args'])\n", (6125, 6151), False, 'from ilurl.networks.base import Network\n'), ((6179, 6213), 'ilurl.loaders.nets.get_tls_custom', 'get_tls_custom', (['network.network_id'], {}), '(network.network_id)\n', (6193, 6213), False, 'from ilurl.loaders.nets import get_tls_custom\n'), ((6230, 6259), 'ilurl.core.params.QLParams', 'QLParams', ([], {}), "(**params['ql_args'])\n", (6238, 6259), False, 'from ilurl.core.params import QLParams\n'), ((6359, 6390), 'flow.core.params.EnvParams', 'EnvParams', ([], {}), "(**params['env_args'])\n", (6368, 6390), False, 'from flow.core.params import SumoParams, EnvParams\n'), ((6408, 6441), 'flow.core.params.SumoParams', 'SumoParams', ([], {}), "(**params['sumo_args'])\n", (6418, 6441), False, 'from flow.core.params import SumoParams, EnvParams\n'), ((4219, 4236), 'copy.deepcopy', 'deepcopy', (['network'], {}), '(network)\n', (4227, 4236), False, 'from copy import deepcopy\n'), ((5120, 5132), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (5129, 5132), False, 'import dill\n'), ((5655, 5673), 'pathlib.Path', 'Path', (['rollout_path'], {}), '(rollout_path)\n', (5659, 5673), False, 'from pathlib import Path\n'), ((5922, 5944), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (5933, 5944), False, 'import random\n'), ((5953, 5978), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5967, 5978), True, 'import numpy as np\n'), ((3095, 3154), 'configargparse.ArgumentTypeError', 'configargparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (3127, 3154), False, 'import configargparse\n'), ((5409, 5421), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5418, 5421), False, 'import json\n')] |
#! /user/bin/env python
# -*- coding: utf-8 -*-
"""
@author: gingkg
@contact: <EMAIL>
@software: PyCharm
@project: man-machine_counteraction
@file: replay_buffer.py
@date: 2021-07-04 15:39
@desc:
"""
import numpy as np
import threading
class ReplayBuffer:
def __init__(self, args):
self.args = args
self.n_actions = self.args.n_actions
self.n_agents = self.args.n_agents
self.state_shape = self.args.state_shape
self.obs_shape = self.args.obs_shape
self.size = self.args.buffer_size
self.episode_limit = self.args.episode_limit
# memory management
self.current_idx = 0
self.current_size = 0
# create the buffer to store info
self.buffers = {'o': np.empty([self.size, self.episode_limit, self.n_agents, self.obs_shape]),
'u': np.empty([self.size, self.episode_limit, self.n_agents, 1]),
's': np.empty([self.size, self.episode_limit, self.state_shape]),
'r': np.empty([self.size, self.episode_limit, 1]),
'o_next': np.empty([self.size, self.episode_limit, self.n_agents, self.obs_shape]),
's_next': np.empty([self.size, self.episode_limit, self.state_shape]),
'avail_u': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
'avail_u_next': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
'u_onehot': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
'padded': np.empty([self.size, self.episode_limit, 1]),
'terminated': np.empty([self.size, self.episode_limit, 1])
}
if self.args.alg == 'maven':
self.buffers['z'] = np.empty([self.size, self.args.noise_dim])
# thread lock
self.lock = threading.Lock()
# store the episode
def store_episode(self, episode_batch):
batch_size = episode_batch['o'].shape[0] # episode_number
with self.lock:
idxs = self._get_storage_idx(inc=batch_size)
# store the informations
self.buffers['o'][idxs] = episode_batch['o']
self.buffers['u'][idxs] = episode_batch['u']
self.buffers['s'][idxs] = episode_batch['s']
self.buffers['r'][idxs] = episode_batch['r']
self.buffers['o_next'][idxs] = episode_batch['o_next']
self.buffers['s_next'][idxs] = episode_batch['s_next']
self.buffers['avail_u'][idxs] = episode_batch['avail_u']
self.buffers['avail_u_next'][idxs] = episode_batch['avail_u_next']
self.buffers['u_onehot'][idxs] = episode_batch['u_onehot']
self.buffers['padded'][idxs] = episode_batch['padded']
self.buffers['terminated'][idxs] = episode_batch['terminated']
if self.args.alg == 'maven':
self.buffers['z'][idxs] = episode_batch['z']
def sample(self, batch_size):
temp_buffer = {}
idx = np.random.randint(0, self.current_size, batch_size)
for key in self.buffers.keys():
temp_buffer[key] = self.buffers[key][idx]
return temp_buffer
def _get_storage_idx(self, inc=None):
inc = inc or 1
if self.current_idx + inc <= self.size:
idx = np.arange(self.current_idx, self.current_idx + inc)
self.current_idx += inc
elif self.current_idx < self.size:
overflow = inc - (self.size - self.current_idx)
idx_a = np.arange(self.current_idx, self.size)
idx_b = np.arange(0, overflow)
idx = np.concatenate([idx_a, idx_b])
self.current_idx = overflow
else:
idx = np.arange(0, inc)
self.current_idx = inc
self.current_size = min(self.size, self.current_size + inc)
if inc == 1:
idx = idx[0]
return idx
| [
"numpy.empty",
"threading.Lock",
"numpy.random.randint",
"numpy.arange",
"numpy.concatenate"
] | [((1959, 1975), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1973, 1975), False, 'import threading\n'), ((3133, 3184), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.current_size', 'batch_size'], {}), '(0, self.current_size, batch_size)\n', (3150, 3184), True, 'import numpy as np\n'), ((751, 823), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.n_agents, self.obs_shape]'], {}), '([self.size, self.episode_limit, self.n_agents, self.obs_shape])\n', (759, 823), True, 'import numpy as np\n'), ((854, 913), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.n_agents, 1]'], {}), '([self.size, self.episode_limit, self.n_agents, 1])\n', (862, 913), True, 'import numpy as np\n'), ((944, 1003), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.state_shape]'], {}), '([self.size, self.episode_limit, self.state_shape])\n', (952, 1003), True, 'import numpy as np\n'), ((1034, 1078), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, 1]'], {}), '([self.size, self.episode_limit, 1])\n', (1042, 1078), True, 'import numpy as np\n'), ((1114, 1186), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.n_agents, self.obs_shape]'], {}), '([self.size, self.episode_limit, self.n_agents, self.obs_shape])\n', (1122, 1186), True, 'import numpy as np\n'), ((1222, 1281), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.state_shape]'], {}), '([self.size, self.episode_limit, self.state_shape])\n', (1230, 1281), True, 'import numpy as np\n'), ((1318, 1390), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.n_agents, self.n_actions]'], {}), '([self.size, self.episode_limit, self.n_agents, self.n_actions])\n', (1326, 1390), True, 'import numpy as np\n'), ((1432, 1504), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.n_agents, self.n_actions]'], {}), '([self.size, self.episode_limit, self.n_agents, self.n_actions])\n', (1440, 1504), True, 'import numpy as np\n'), ((1542, 1614), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, self.n_agents, self.n_actions]'], {}), '([self.size, self.episode_limit, self.n_agents, self.n_actions])\n', (1550, 1614), True, 'import numpy as np\n'), ((1650, 1694), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, 1]'], {}), '([self.size, self.episode_limit, 1])\n', (1658, 1694), True, 'import numpy as np\n'), ((1734, 1778), 'numpy.empty', 'np.empty', (['[self.size, self.episode_limit, 1]'], {}), '([self.size, self.episode_limit, 1])\n', (1742, 1778), True, 'import numpy as np\n'), ((1874, 1916), 'numpy.empty', 'np.empty', (['[self.size, self.args.noise_dim]'], {}), '([self.size, self.args.noise_dim])\n', (1882, 1916), True, 'import numpy as np\n'), ((3438, 3489), 'numpy.arange', 'np.arange', (['self.current_idx', '(self.current_idx + inc)'], {}), '(self.current_idx, self.current_idx + inc)\n', (3447, 3489), True, 'import numpy as np\n'), ((3649, 3687), 'numpy.arange', 'np.arange', (['self.current_idx', 'self.size'], {}), '(self.current_idx, self.size)\n', (3658, 3687), True, 'import numpy as np\n'), ((3708, 3730), 'numpy.arange', 'np.arange', (['(0)', 'overflow'], {}), '(0, overflow)\n', (3717, 3730), True, 'import numpy as np\n'), ((3749, 3779), 'numpy.concatenate', 'np.concatenate', (['[idx_a, idx_b]'], {}), '([idx_a, idx_b])\n', (3763, 3779), True, 'import numpy as np\n'), ((3852, 3869), 'numpy.arange', 'np.arange', (['(0)', 'inc'], {}), '(0, inc)\n', (3861, 3869), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 13:43:44 2013
Author: <NAME>
"""
from __future__ import print_function
import numpy as np
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
y_true = np.sin(x*5)/x + 2*x
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
model = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls',
defaults=nparam.EstimatorSettings(efficient=True))
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
model1 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls')
mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='ll',
var_type='c', bw='cv_ls')
mean2, mfx2 = model2.fit()
print(model.bw)
print(model1.bw)
print(model2.bw)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean, lw=2, label='kernel mean')
ax.plot(x, mean2, lw=2, label='kernel mean')
ax.legend()
plt.show()
| [
"numpy.random.uniform",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.random.normal",
"statsmodels.nonparametric.api.EstimatorSettings",
"statsmodels.nonparametric.api.KernelReg"
] | [((223, 242), 'numpy.random.seed', 'np.random.seed', (['(500)'], {}), '(500)\n', (237, 242), True, 'import numpy as np\n'), ((293, 328), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)'], {'size': 'nobs'}), '(-2, 2, size=nobs)\n', (310, 328), True, 'import numpy as np\n'), ((756, 834), 'statsmodels.nonparametric.api.KernelReg', 'nparam.KernelReg', ([], {'endog': '[y]', 'exog': '[x]', 'reg_type': '"""lc"""', 'var_type': '"""c"""', 'bw': '"""cv_ls"""'}), "(endog=[y], exog=[x], reg_type='lc', var_type='c', bw='cv_ls')\n", (772, 834), True, 'import statsmodels.nonparametric.api as nparam\n'), ((938, 1016), 'statsmodels.nonparametric.api.KernelReg', 'nparam.KernelReg', ([], {'endog': '[y]', 'exog': '[x]', 'reg_type': '"""ll"""', 'var_type': '"""c"""', 'bw': '"""cv_ls"""'}), "(endog=[y], exog=[x], reg_type='ll', var_type='c', bw='cv_ls')\n", (954, 1016), True, 'import statsmodels.nonparametric.api as nparam\n'), ((1217, 1229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1227, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1472, 1474), True, 'import matplotlib.pyplot as plt\n'), ((355, 368), 'numpy.sin', 'np.sin', (['(x * 5)'], {}), '(x * 5)\n', (361, 368), True, 'import numpy as np\n'), ((427, 454), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'nobs'}), '(size=nobs)\n', (443, 454), True, 'import numpy as np\n'), ((643, 683), 'statsmodels.nonparametric.api.EstimatorSettings', 'nparam.EstimatorSettings', ([], {'efficient': '(True)'}), '(efficient=True)\n', (667, 683), True, 'import statsmodels.nonparametric.api as nparam\n'), ((411, 424), 'numpy.abs', 'np.abs', (['(3 + x)'], {}), '(3 + x)\n', (417, 424), True, 'import numpy as np\n')] |
'''
===============================================================================
-- Author: <NAME>, <NAME>
-- Create date: 01/11/2020
-- Description: This code is for T-distributed Stochastic Neighbor Embedding (t-SNE)
which is a method for redcuing dimension for image comparison.
This can be used for visualisation of correlation or connection
between images belong to a unit class.
-- Status: In progress
===============================================================================
'''
import numpy as np
import cv2
import sys
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def image_to_feature_vector(image, size=(16, 16)):
return cv2.resize(image, size).flatten()
data = []
labels = []
for root, dirs, files in os.walk(sys.argv[1]):
for filename in files:
ext = filename[filename.rfind("."):].lower()
imagePath = os.path.join(root, filename)
label = imagePath.split(os.path.sep)[-2]
print(label)
image = cv2.imread(imagePath, 0)
H = image_to_feature_vector(image, size=(160, 160))
H = np.array(H, dtype="float") / 255.0
labels.append(label)
data.append(H)
data = np.array(data)
tsne = TSNE(n_components=2, random_state=0)
X = data
X_2d = tsne.fit_transform(X)
plt.figure(figsize=(6, 5))
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple'
y = []
la = list(set(labels))
print(la)
for i in range(0, len(X)):
for j in range(0, len(la)):
if labels[i] == la[j]:
y.append(j)
plt.scatter(X_2d[i, 0], X_2d[i, 1], c=colors[j], label=la[j])
break
plt.show()
| [
"matplotlib.pyplot.show",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.scatter",
"os.walk",
"cv2.imread",
"matplotlib.pyplot.figure",
"numpy.array",
"os.path.join",
"cv2.resize"
] | [((889, 909), 'os.walk', 'os.walk', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (896, 909), False, 'import os\n'), ((1332, 1346), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1340, 1346), True, 'import numpy as np\n'), ((1355, 1391), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)'}), '(n_components=2, random_state=0)\n', (1359, 1391), False, 'from sklearn.manifold import TSNE\n'), ((1435, 1461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (1445, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1797, 1799), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1042), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1026, 1042), False, 'import os\n'), ((1132, 1156), 'cv2.imread', 'cv2.imread', (['imagePath', '(0)'], {}), '(imagePath, 0)\n', (1142, 1156), False, 'import cv2\n'), ((797, 820), 'cv2.resize', 'cv2.resize', (['image', 'size'], {}), '(image, size)\n', (807, 820), False, 'import cv2\n'), ((1231, 1257), 'numpy.array', 'np.array', (['H'], {'dtype': '"""float"""'}), "(H, dtype='float')\n", (1239, 1257), True, 'import numpy as np\n'), ((1705, 1766), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_2d[i, 0]', 'X_2d[i, 1]'], {'c': 'colors[j]', 'label': 'la[j]'}), '(X_2d[i, 0], X_2d[i, 1], c=colors[j], label=la[j])\n', (1716, 1766), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.