code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Kraus representation of a Quantum Channel.
"""
import copy
from numbers import Number
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.instruction import Instruction
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_identity_matrix
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.op_shape import OpShape
from qiskit.quantum_info.operators.channel.choi import Choi
from qiskit.quantum_info.operators.channel.superop import SuperOp
from qiskit.quantum_info.operators.channel.transformations import _to_kraus
from qiskit.quantum_info.operators.mixins import generate_apidocs
class Kraus(QuantumChannel):
r"""Kraus representation of a quantum channel.
For a quantum channel :math:`\mathcal{E}`, the Kraus representation is
given by a set of matrices :math:`[A_0,...,A_{K-1}]` such that the
evolution of a :class:`~qiskit.quantum_info.DensityMatrix`
:math:`\rho` is given by
.. math::
\mathcal{E}(\rho) = \sum_{i=0}^{K-1} A_i \rho A_i^\dagger
A general operator map :math:`\mathcal{G}` can also be written using the
generalized Kraus representation which is given by two sets of matrices
:math:`[A_0,...,A_{K-1}]`, :math:`[B_0,...,A_{B-1}]` such that
.. math::
\mathcal{G}(\rho) = \sum_{i=0}^{K-1} A_i \rho B_i^\dagger
See reference [1] for further details.
References:
1. <NAME>, <NAME>, <NAME>, *Tensor networks and graphical calculus
for open quantum systems*, Quant. Inf. Comp. 15, 0579-0811 (2015).
`arXiv:1111.6950 [quant-ph] <https://arxiv.org/abs/1111.6950>`_
"""
def __init__(self, data, input_dims=None, output_dims=None):
"""Initialize a quantum channel Kraus operator.
Args:
data (QuantumCircuit or
Instruction or
BaseOperator or
matrix): data to initialize superoperator.
input_dims (tuple): the input subsystem dimensions.
[Default: None]
output_dims (tuple): the output subsystem dimensions.
[Default: None]
Raises:
QiskitError: if input data cannot be initialized as a
a list of Kraus matrices.
Additional Information:
If the input or output dimensions are None, they will be
automatically determined from the input data. If the input data is
a list of Numpy arrays of shape (2**N, 2**N) qubit systems will be
used. If the input does not correspond to an N-qubit channel, it
will assign a single subsystem with dimension specified by the
shape of the input.
"""
# If the input is a list or tuple we assume it is a list of Kraus
# matrices, if it is a numpy array we assume that it is a single Kraus
# operator
if isinstance(data, (list, tuple, np.ndarray)):
# Check if it is a single unitary matrix A for channel:
# E(rho) = A * rho * A^\dagger
if isinstance(data, np.ndarray) or np.array(data).ndim == 2:
# Convert single Kraus op to general Kraus pair
kraus = ([np.asarray(data, dtype=complex)], None)
shape = kraus[0][0].shape
# Check if single Kraus set [A_i] for channel:
# E(rho) = sum_i A_i * rho * A_i^dagger
elif isinstance(data, list) and len(data) > 0:
# Get dimensions from first Kraus op
kraus = [np.asarray(data[0], dtype=complex)]
shape = kraus[0].shape
# Iterate over remaining ops and check they are same shape
for i in data[1:]:
op = np.asarray(i, dtype=complex)
if op.shape != shape:
raise QiskitError(
"Kraus operators are different dimensions.")
kraus.append(op)
# Convert single Kraus set to general Kraus pair
kraus = (kraus, None)
# Check if generalized Kraus set ([A_i], [B_i]) for channel:
# E(rho) = sum_i A_i * rho * B_i^dagger
elif isinstance(data,
tuple) and len(data) == 2 and len(data[0]) > 0:
kraus_left = [np.asarray(data[0][0], dtype=complex)]
shape = kraus_left[0].shape
for i in data[0][1:]:
op = np.asarray(i, dtype=complex)
if op.shape != shape:
raise QiskitError(
"Kraus operators are different dimensions.")
kraus_left.append(op)
if data[1] is None:
kraus = (kraus_left, None)
else:
kraus_right = []
for i in data[1]:
op = np.asarray(i, dtype=complex)
if op.shape != shape:
raise QiskitError(
"Kraus operators are different dimensions.")
kraus_right.append(op)
kraus = (kraus_left, kraus_right)
else:
raise QiskitError("Invalid input for Kraus channel.")
op_shape = OpShape.auto(dims_l=output_dims, dims_r=input_dims,
shape=kraus[0][0].shape)
else:
# Otherwise we initialize by conversion from another Qiskit
# object into the QuantumChannel.
if isinstance(data, (QuantumCircuit, Instruction)):
# If the input is a Terra QuantumCircuit or Instruction we
# convert it to a SuperOp
data = SuperOp._init_instruction(data)
else:
# We use the QuantumChannel init transform to initialize
# other objects into a QuantumChannel or Operator object.
data = self._init_transformer(data)
op_shape = data._op_shape
output_dim, input_dim = op_shape.shape
# Now that the input is an operator we convert it to a Kraus
rep = getattr(data, '_channel_rep', 'Operator')
kraus = _to_kraus(rep, data._data, input_dim, output_dim)
# Initialize either single or general Kraus
if kraus[1] is None or np.allclose(kraus[0], kraus[1]):
# Standard Kraus map
data = (kraus[0], None)
else:
# General (non-CPTP) Kraus map
data = kraus
super().__init__(data, op_shape=op_shape)
@property
def data(self):
"""Return list of Kraus matrices for channel."""
if self._data[1] is None:
# If only a single Kraus set, don't return the tuple
# Just the fist set
return self._data[0]
else:
# Otherwise return the tuple of both kraus sets
return self._data
def is_cptp(self, atol=None, rtol=None):
"""Return True if completely-positive trace-preserving."""
if self._data[1] is not None:
return False
if atol is None:
atol = self.atol
if rtol is None:
rtol = self.rtol
accum = 0j
for op in self._data[0]:
accum += np.dot(np.transpose(np.conj(op)), op)
return is_identity_matrix(accum, rtol=rtol, atol=atol)
def _evolve(self, state, qargs=None):
return SuperOp(self)._evolve(state, qargs)
# ---------------------------------------------------------------------
# BaseOperator methods
# ---------------------------------------------------------------------
def conjugate(self):
ret = copy.copy(self)
kraus_l, kraus_r = self._data
kraus_l = [np.conj(k) for k in kraus_l]
if kraus_r is not None:
kraus_r = [k.conj() for k in kraus_r]
ret._data = (kraus_l, kraus_r)
return ret
def transpose(self):
ret = copy.copy(self)
ret._op_shape = self._op_shape.transpose()
kraus_l, kraus_r = self._data
kraus_l = [np.transpose(k) for k in kraus_l]
if kraus_r is not None:
kraus_r = [np.transpose(k) for k in kraus_r]
ret._data = (kraus_l, kraus_r)
return ret
def adjoint(self):
ret = copy.copy(self)
ret._op_shape = self._op_shape.transpose()
kraus_l, kraus_r = self._data
kraus_l = [np.conj(np.transpose(k)) for k in kraus_l]
if kraus_r is not None:
kraus_r = [np.conj(np.transpose(k)) for k in kraus_r]
ret._data = (kraus_l, kraus_r)
return ret
def compose(self, other, qargs=None, front=False):
if qargs is None:
qargs = getattr(other, 'qargs', None)
if qargs is not None:
return Kraus(
SuperOp(self).compose(other, qargs=qargs, front=front))
if not isinstance(other, Kraus):
other = Kraus(other)
new_shape = self._op_shape.compose(other._op_shape, qargs, front)
input_dims = new_shape.dims_r()
output_dims = new_shape.dims_l()
if front:
ka_l, ka_r = self._data
kb_l, kb_r = other._data
else:
ka_l, ka_r = other._data
kb_l, kb_r = self._data
kab_l = [np.dot(a, b) for a in ka_l for b in kb_l]
if ka_r is None and kb_r is None:
kab_r = None
elif ka_r is None:
kab_r = [np.dot(a, b) for a in ka_l for b in kb_r]
elif kb_r is None:
kab_r = [np.dot(a, b) for a in ka_r for b in kb_l]
else:
kab_r = [np.dot(a, b) for a in ka_r for b in kb_r]
ret = Kraus((kab_l, kab_r), input_dims, output_dims)
ret._op_shape = new_shape
return ret
def tensor(self, other):
if not isinstance(other, Kraus):
other = Kraus(other)
return self._tensor(self, other)
def expand(self, other):
if not isinstance(other, Kraus):
other = Kraus(other)
return self._tensor(other, self)
@classmethod
def _tensor(cls, a, b):
ret = copy.copy(a)
ret._op_shape = a._op_shape.tensor(b._op_shape)
# Get tensor matrix
ka_l, ka_r = a._data
kb_l, kb_r = b._data
kab_l = [np.kron(ka, kb) for ka in ka_l for kb in kb_l]
if ka_r is None and kb_r is None:
kab_r = None
else:
if ka_r is None:
ka_r = ka_l
if kb_r is None:
kb_r = kb_l
kab_r = [np.kron(a, b) for a in ka_r for b in kb_r]
ret._data = (kab_l, kab_r)
return ret
def __add__(self, other):
qargs = getattr(other, 'qargs', None)
if not isinstance(other, QuantumChannel):
other = Choi(other)
return self._add(other, qargs=qargs)
def __sub__(self, other):
qargs = getattr(other, 'qargs', None)
if not isinstance(other, QuantumChannel):
other = Choi(other)
return self._add(-other, qargs=qargs)
def _add(self, other, qargs=None):
# Since we cannot directly add two channels in the Kraus
# representation we try and use the other channels method
# or convert to the Choi representation
return Kraus(Choi(self)._add(other, qargs=qargs))
def _multiply(self, other):
if not isinstance(other, Number):
raise QiskitError("other is not a number")
ret = copy.copy(self)
# If the number is complex we need to convert to general
# kraus channel so we multiply via Choi representation
if isinstance(other, complex) or other < 0:
# Convert to Choi-matrix
ret._data = Kraus(Choi(self)._multiply(other))._data
return ret
# If the number is real we can update the Kraus operators
# directly
val = np.sqrt(other)
kraus_r = None
kraus_l = [val * k for k in self._data[0]]
if self._data[1] is not None:
kraus_r = [val * k for k in self._data[1]]
ret._data = (kraus_l, kraus_r)
return ret
# Update docstrings for API docs
generate_apidocs(Kraus)
| [
"qiskit.quantum_info.operators.predicates.is_identity_matrix",
"numpy.allclose",
"numpy.sqrt",
"numpy.conj",
"qiskit.quantum_info.operators.mixins.generate_apidocs",
"qiskit.quantum_info.operators.op_shape.OpShape.auto",
"numpy.asarray",
"qiskit.quantum_info.operators.channel.transformations._to_kraus... | [((12941, 12964), 'qiskit.quantum_info.operators.mixins.generate_apidocs', 'generate_apidocs', (['Kraus'], {}), '(Kraus)\n', (12957, 12964), False, 'from qiskit.quantum_info.operators.mixins import generate_apidocs\n'), ((8055, 8102), 'qiskit.quantum_info.operators.predicates.is_identity_matrix', 'is_identity_matrix', (['accum'], {'rtol': 'rtol', 'atol': 'atol'}), '(accum, rtol=rtol, atol=atol)\n', (8073, 8102), False, 'from qiskit.quantum_info.operators.predicates import is_identity_matrix\n'), ((8417, 8432), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (8426, 8432), False, 'import copy\n'), ((8699, 8714), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (8708, 8714), False, 'import copy\n'), ((9042, 9057), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (9051, 9057), False, 'import copy\n'), ((10882, 10894), 'copy.copy', 'copy.copy', (['a'], {}), '(a)\n', (10891, 10894), False, 'import copy\n'), ((12246, 12261), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (12255, 12261), False, 'import copy\n'), ((12666, 12680), 'numpy.sqrt', 'np.sqrt', (['other'], {}), '(other)\n', (12673, 12680), True, 'import numpy as np\n'), ((5977, 6053), 'qiskit.quantum_info.operators.op_shape.OpShape.auto', 'OpShape.auto', ([], {'dims_l': 'output_dims', 'dims_r': 'input_dims', 'shape': 'kraus[0][0].shape'}), '(dims_l=output_dims, dims_r=input_dims, shape=kraus[0][0].shape)\n', (5989, 6053), False, 'from qiskit.quantum_info.operators.op_shape import OpShape\n'), ((6917, 6966), 'qiskit.quantum_info.operators.channel.transformations._to_kraus', '_to_kraus', (['rep', 'data._data', 'input_dim', 'output_dim'], {}), '(rep, data._data, input_dim, output_dim)\n', (6926, 6966), False, 'from qiskit.quantum_info.operators.channel.transformations import _to_kraus\n'), ((7051, 7082), 'numpy.allclose', 'np.allclose', (['kraus[0]', 'kraus[1]'], {}), '(kraus[0], kraus[1])\n', (7062, 7082), True, 'import numpy as np\n'), ((8490, 8500), 'numpy.conj', 'np.conj', (['k'], {}), '(k)\n', (8497, 8500), True, 'import numpy as np\n'), ((8823, 8838), 'numpy.transpose', 'np.transpose', (['k'], {}), '(k)\n', (8835, 8838), True, 'import numpy as np\n'), ((10052, 10064), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (10058, 10064), True, 'import numpy as np\n'), ((11055, 11070), 'numpy.kron', 'np.kron', (['ka', 'kb'], {}), '(ka, kb)\n', (11062, 11070), True, 'import numpy as np\n'), ((11562, 11573), 'qiskit.quantum_info.operators.channel.choi.Choi', 'Choi', (['other'], {}), '(other)\n', (11566, 11573), False, 'from qiskit.quantum_info.operators.channel.choi import Choi\n'), ((11766, 11777), 'qiskit.quantum_info.operators.channel.choi.Choi', 'Choi', (['other'], {}), '(other)\n', (11770, 11777), False, 'from qiskit.quantum_info.operators.channel.choi import Choi\n'), ((12194, 12230), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""other is not a number"""'], {}), "('other is not a number')\n", (12205, 12230), False, 'from qiskit.exceptions import QiskitError\n'), ((6426, 6457), 'qiskit.quantum_info.operators.channel.superop.SuperOp._init_instruction', 'SuperOp._init_instruction', (['data'], {}), '(data)\n', (6451, 6457), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((8161, 8174), 'qiskit.quantum_info.operators.channel.superop.SuperOp', 'SuperOp', (['self'], {}), '(self)\n', (8168, 8174), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((8912, 8927), 'numpy.transpose', 'np.transpose', (['k'], {}), '(k)\n', (8924, 8927), True, 'import numpy as np\n'), ((9174, 9189), 'numpy.transpose', 'np.transpose', (['k'], {}), '(k)\n', (9186, 9189), True, 'import numpy as np\n'), ((11318, 11331), 'numpy.kron', 'np.kron', (['a', 'b'], {}), '(a, b)\n', (11325, 11331), True, 'import numpy as np\n'), ((8022, 8033), 'numpy.conj', 'np.conj', (['op'], {}), '(op)\n', (8029, 8033), True, 'import numpy as np\n'), ((9272, 9287), 'numpy.transpose', 'np.transpose', (['k'], {}), '(k)\n', (9284, 9287), True, 'import numpy as np\n'), ((10209, 10221), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (10215, 10221), True, 'import numpy as np\n'), ((12064, 12074), 'qiskit.quantum_info.operators.channel.choi.Choi', 'Choi', (['self'], {}), '(self)\n', (12068, 12074), False, 'from qiskit.quantum_info.operators.channel.choi import Choi\n'), ((3732, 3746), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3740, 3746), True, 'import numpy as np\n'), ((3848, 3879), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'complex'}), '(data, dtype=complex)\n', (3858, 3879), True, 'import numpy as np\n'), ((4179, 4213), 'numpy.asarray', 'np.asarray', (['data[0]'], {'dtype': 'complex'}), '(data[0], dtype=complex)\n', (4189, 4213), True, 'import numpy as np\n'), ((4389, 4417), 'numpy.asarray', 'np.asarray', (['i'], {'dtype': 'complex'}), '(i, dtype=complex)\n', (4399, 4417), True, 'import numpy as np\n'), ((5906, 5953), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Invalid input for Kraus channel."""'], {}), "('Invalid input for Kraus channel.')\n", (5917, 5953), False, 'from qiskit.exceptions import QiskitError\n'), ((9569, 9582), 'qiskit.quantum_info.operators.channel.superop.SuperOp', 'SuperOp', (['self'], {}), '(self)\n', (9576, 9582), False, 'from qiskit.quantum_info.operators.channel.superop import SuperOp\n'), ((10299, 10311), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (10305, 10311), True, 'import numpy as np\n'), ((10376, 10388), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (10382, 10388), True, 'import numpy as np\n'), ((4490, 4546), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Kraus operators are different dimensions."""'], {}), "('Kraus operators are different dimensions.')\n", (4501, 4546), False, 'from qiskit.exceptions import QiskitError\n'), ((4982, 5019), 'numpy.asarray', 'np.asarray', (['data[0][0]'], {'dtype': 'complex'}), '(data[0][0], dtype=complex)\n', (4992, 5019), True, 'import numpy as np\n'), ((5128, 5156), 'numpy.asarray', 'np.asarray', (['i'], {'dtype': 'complex'}), '(i, dtype=complex)\n', (5138, 5156), True, 'import numpy as np\n'), ((12509, 12519), 'qiskit.quantum_info.operators.channel.choi.Choi', 'Choi', (['self'], {}), '(self)\n', (12513, 12519), False, 'from qiskit.quantum_info.operators.channel.choi import Choi\n'), ((5229, 5285), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Kraus operators are different dimensions."""'], {}), "('Kraus operators are different dimensions.')\n", (5240, 5285), False, 'from qiskit.exceptions import QiskitError\n'), ((5566, 5594), 'numpy.asarray', 'np.asarray', (['i'], {'dtype': 'complex'}), '(i, dtype=complex)\n', (5576, 5594), True, 'import numpy as np\n'), ((5675, 5731), 'qiskit.exceptions.QiskitError', 'QiskitError', (['"""Kraus operators are different dimensions."""'], {}), "('Kraus operators are different dimensions.')\n", (5686, 5731), False, 'from qiskit.exceptions import QiskitError\n')] |
'''Train DCENet with PyTorch'''
# from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import os
import json
import neptune
import argparse
import numpy as np
from loader import *
from utils.plots import *
from utils.utils import *
from utils.collision import *
from utils.datainfo import DataInfo
from utils.ranking import gauss_rank
from models import DCENet
from loss import DCENetLoss
def main():
# ================= Arguments ================ #
parser = argparse.ArgumentParser(description='PyTorch Knowledge Distillation')
parser.add_argument('--gpu', type=str, default="4", help='gpu id')
parser.add_argument('--config', type=str, default="config", help='.json')
args = parser.parse_args()
# ================= Device Setup ================ #
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# ================= Config Load ================ #
with open('config/' + args.config) as config_file:
config = json.load(config_file)
# ================= Neptune Setup ================ #
if config['neptune']:
neptune.init('seongjulee/DCENet', api_token=config["neptune_token"]) # username/project-name, api_token=token from neptune
neptune.create_experiment(name='EXP', params=config) # name=project name (anything is ok), params=parameter list (json format)
neptune.append_tag(args.config) # neptune tag (str or string list)
# ================= Model Setup ================ #
model = nn.DataParallel(DCENet(config)).to(device) if len(args.gpu.split(',')) > 1 else DCENet(config).to(device)
# ================= Loss Function ================ #
criterion = DCENetLoss(config)
# ================= Optimizer Setup ================ #
optimizer = optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-6, amsgrad=False)
# ================= Data Loader ================ #
datalist = DataInfo()
train_datalist = datalist.train_merged
print('Train data list', train_datalist)
test_datalist = datalist.train_biwi
print('Test data list', test_datalist)
np.random.seed(10)
offsets, traj_data, occupancy = load_data(config, train_datalist, datatype="train")
trainval_split = np.random.rand(len(offsets)) < config['split']
train_x = offsets[trainval_split, :config['obs_seq'] - 1, 4:6]
train_occu = occupancy[trainval_split, :config['obs_seq'] - 1, ..., :config['enviro_pdim'][-1]]
train_y = offsets[trainval_split, config['obs_seq'] - 1:, 4:6]
train_y_occu = occupancy[trainval_split, config['obs_seq'] - 1:, ..., :config['enviro_pdim'][-1]]
val_x = offsets[~trainval_split, :config['obs_seq'] - 1, 4:6]
val_occu = occupancy[~trainval_split, :config['obs_seq'] - 1, ..., :config['enviro_pdim'][-1]]
val_y = offsets[~trainval_split, config['obs_seq'] - 1:, 4:6]
val_y_occu = occupancy[~trainval_split, config['obs_seq'] - 1:, ..., :config['enviro_pdim'][-1]]
print("%.0f trajectories for training\n %.0f trajectories for valiadation" %(train_x.shape[0], val_x.shape[0]))
test_offsets, test_trajs, test_occupancy = load_data(config, test_datalist, datatype="test")
test_x = test_offsets[:, :config['obs_seq'] - 1, 4:6]
test_occu = test_occupancy[:, :config['obs_seq'] - 1, ..., :config['enviro_pdim'][-1]]
last_obs_test = test_offsets[:, config['obs_seq'] - 2, 2:4]
y_truth = test_offsets[:, config['obs_seq'] - 1:, :4]
xy_truth = test_offsets[:, :, :4]
print('test_trajs', test_trajs.shape)
print("%.0f trajectories for testing" % (test_x.shape[0]))
train_dataset = TrajDataset(x=train_x, x_occu=train_occu, y=train_y, y_occu=train_y_occu, mode='train')
train_loader = DataLoader(dataset=train_dataset, batch_size=config["batch_size"], shuffle=True, num_workers=4)
val_dataset = TrajDataset(x=val_x, x_occu=val_occu, y=val_y, y_occu=val_y_occu, mode='val')
val_loader = DataLoader(dataset=val_dataset, batch_size=config["batch_size"], shuffle=False, num_workers=4)
# test_dataset = TrajDataset(x=test_x, x_occu=test_occu, y=y_truth, y_occu=None, mode='test')
# test_loader = DataLoader(dataset=test_dataset, batch_size=config["batch_size"], shuffle=False, num_workers=4)
# ================= Training Loop ================ #
early_stopping = EarlyStopping(patience=config['patience'], verbose=True, filename=args.config.split('/')[-1].replace('.json', '.pth'))
for epoch in range(config['max_epochs']):
train_one_epoch(config, epoch, device, model, optimizer, criterion, train_loader)
val_loss = evaluate(config, device, model, optimizer, criterion, val_loader)
early_stopping(val_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# ================= Test ================ #
model.load_state_dict(torch.load(os.path.join('checkpoints', args.config.split('/')[-1].replace('.json', '.pth'))))
model.eval()
with torch.no_grad():
test_x, test_occu = input2tensor(test_x, test_occu, device)
x_latent = model.encoder_x(test_x, test_occu)
predictions = []
for i, x_ in enumerate(x_latent):
last_pos = last_obs_test[i]
x_ = x_.view(1, -1)
for i in range(config['num_pred']):
y_p = model.decoder(x_, train=False)
y_p_ = np.concatenate(([last_pos], np.squeeze(y_p.cpu().numpy())), axis=0)
y_p_sum = np.cumsum(y_p_, axis=0)
predictions.append(y_p_sum[1:, :])
predictions = np.reshape(predictions, [-1, config['num_pred'], config['pred_seq'], 2])
print('Predicting done!')
print(predictions.shape)
plot_pred(xy_truth, predictions)
# Get the errors for ADE, DEF, Hausdorff distance, speed deviation, heading error
print("\nEvaluation results @top%.0f" % config['num_pred'])
errors = get_errors(y_truth, predictions)
check_collision(y_truth)
## Get the first time prediction by g
ranked_prediction = []
for prediction in predictions:
ranks = gauss_rank(prediction)
ranked_prediction.append(prediction[np.argmax(ranks)])
ranked_prediction = np.reshape(ranked_prediction, [-1, 1, config['pred_seq'], 2])
print("\nEvaluation results for most-likely predictions")
ranked_errors = get_errors(y_truth, ranked_prediction)
# Function for one epoch training
def train_one_epoch(config, epoch, device, model, optimizer, criterion, loader):
print('\nEpoch: %d' % epoch)
model.train()
train_total, train_loss = 0, 0
for batch_idx, (x, x_occu, y, y_occu) in enumerate(loader):
x, x_occu, y, y_occu = x.to(device), x_occu.to(device), y.to(device), y_occu.to(device)
optimizer.zero_grad()
y_pred, mu, log_var = model(x, x_occu, y, y_occu, train=True)
loss = criterion(mu, log_var, y_pred, y)
loss.backward()
optimizer.step()
# train_ade += ade * x.size(0)
# train_fde += fde * x.size(0)
train_total += x.size(0)
train_loss += loss.item() * x.size(0)
if config['neptune']:
# neptune.log_metric('train_batch_ADE', ade)
# neptune.log_metric('train_batch_FDE', fde)
neptune.log_metric('train_batch_Loss', loss.item())
# progress_bar(batch_idx, len(loader), 'Lr: %.4e | Loss: %.3f | ADE[m]: %.3f | FDE[m]: %.3f'
# % (get_lr(optimizer), train_loss / train_total, train_ade / train_total, train_fde / train_total))
progress_bar(batch_idx, len(loader), 'Lr: %.4e | Loss: %.3f' % (get_lr(optimizer), train_loss / train_total))
# Function for validation
@torch.no_grad()
def evaluate(config, device, model, optimizer, criterion, loader):
model.eval()
# eval_ade, eval_fde, eval_total = 0, 0, 0
eval_total, eval_loss = 0, 0
for batch_idx, (x, x_occu, y, y_occu) in enumerate(loader):
x, x_occu, y, y_occu = x.to(device), x_occu.to(device), y.to(device), y_occu.to(device)
y_pred, mu, log_var = model(x, x_occu, y, y_occu, train=True)
loss = criterion(mu, log_var, y_pred, y)
eval_total += x.size(0)
eval_loss += loss.item() * x.size(0)
progress_bar(batch_idx, len(loader), 'Lr: %.4e | Loss: %.3f' % (get_lr(optimizer), eval_loss / eval_total))
# progress_bar(batch_idx, len(loader), 'Lr: %.4e | ADE[m]: %.3f | FDE[m]: %.3f'
# % (get_lr(optimizer), eval_ade / eval_total, eval_fde / eval_total))
if config['neptune']:
neptune.log_metric('val_Loss', eval_loss / eval_total)
# neptune.log_metric('{}_ADE'.format(loader.dataset.mode), eval_ade / eval_total)
# neptune.log_metric('{}_FDE'.format(loader.dataset.mode), eval_fde / eval_total)
return eval_loss / eval_total
if __name__ == "__main__":
main()
| [
"utils.ranking.gauss_rank",
"neptune.init",
"numpy.reshape",
"neptune.log_metric",
"argparse.ArgumentParser",
"neptune.create_experiment",
"numpy.argmax",
"models.DCENet",
"torch.cuda.is_available",
"numpy.random.seed",
"utils.datainfo.DataInfo",
"torch.utils.data.DataLoader",
"json.load",
... | [((7920, 7935), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7933, 7935), False, 'import torch\n'), ((564, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Knowledge Distillation"""'}), "(description='PyTorch Knowledge Distillation')\n", (587, 633), False, 'import argparse\n'), ((1884, 1902), 'loss.DCENetLoss', 'DCENetLoss', (['config'], {}), '(config)\n', (1894, 1902), False, 'from loss import DCENetLoss\n'), ((2162, 2172), 'utils.datainfo.DataInfo', 'DataInfo', ([], {}), '()\n', (2170, 2172), False, 'from utils.datainfo import DataInfo\n'), ((2350, 2368), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (2364, 2368), True, 'import numpy as np\n'), ((3954, 4054), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': "config['batch_size']", 'shuffle': '(True)', 'num_workers': '(4)'}), "(dataset=train_dataset, batch_size=config['batch_size'], shuffle=\n True, num_workers=4)\n", (3964, 4054), False, 'from torch.utils.data import DataLoader\n'), ((4164, 4263), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': "config['batch_size']", 'shuffle': '(False)', 'num_workers': '(4)'}), "(dataset=val_dataset, batch_size=config['batch_size'], shuffle=\n False, num_workers=4)\n", (4174, 4263), False, 'from torch.utils.data import DataLoader\n'), ((5818, 5890), 'numpy.reshape', 'np.reshape', (['predictions', "[-1, config['num_pred'], config['pred_seq'], 2]"], {}), "(predictions, [-1, config['num_pred'], config['pred_seq'], 2])\n", (5828, 5890), True, 'import numpy as np\n'), ((6444, 6505), 'numpy.reshape', 'np.reshape', (['ranked_prediction', "[-1, 1, config['pred_seq'], 2]"], {}), "(ranked_prediction, [-1, 1, config['pred_seq'], 2])\n", (6454, 6505), True, 'import numpy as np\n'), ((1177, 1199), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (1186, 1199), False, 'import json\n'), ((1292, 1360), 'neptune.init', 'neptune.init', (['"""seongjulee/DCENet"""'], {'api_token': "config['neptune_token']"}), "('seongjulee/DCENet', api_token=config['neptune_token'])\n", (1304, 1360), False, 'import neptune\n'), ((1425, 1477), 'neptune.create_experiment', 'neptune.create_experiment', ([], {'name': '"""EXP"""', 'params': 'config'}), "(name='EXP', params=config)\n", (1450, 1477), False, 'import neptune\n'), ((1561, 1592), 'neptune.append_tag', 'neptune.append_tag', (['args.config'], {}), '(args.config)\n', (1579, 1592), False, 'import neptune\n'), ((5228, 5243), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5241, 5243), False, 'import torch\n'), ((6334, 6356), 'utils.ranking.gauss_rank', 'gauss_rank', (['prediction'], {}), '(prediction)\n', (6344, 6356), False, 'from utils.ranking import gauss_rank\n'), ((8786, 8840), 'neptune.log_metric', 'neptune.log_metric', (['"""val_Loss"""', '(eval_loss / eval_total)'], {}), "('val_Loss', eval_loss / eval_total)\n", (8804, 8840), False, 'import neptune\n'), ((1011, 1036), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1034, 1036), False, 'import torch\n'), ((1780, 1794), 'models.DCENet', 'DCENet', (['config'], {}), '(config)\n', (1786, 1794), False, 'from models import DCENet\n'), ((5724, 5747), 'numpy.cumsum', 'np.cumsum', (['y_p_'], {'axis': '(0)'}), '(y_p_, axis=0)\n', (5733, 5747), True, 'import numpy as np\n'), ((6401, 6417), 'numpy.argmax', 'np.argmax', (['ranks'], {}), '(ranks)\n', (6410, 6417), True, 'import numpy as np\n'), ((1716, 1730), 'models.DCENet', 'DCENet', (['config'], {}), '(config)\n', (1722, 1730), False, 'from models import DCENet\n')] |
from unittest import TestCase
import os.path as osp
import numpy as np
from datumaro.components.annotation import Label, Points
from datumaro.components.dataset import Dataset
from datumaro.components.extractor import DatasetItem
from datumaro.plugins.lfw_format import LfwConverter, LfwImporter
from datumaro.util.image import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class LfwFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
})]
),
DatasetItem(id='name0_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0001'],
'negative_pairs': ['name1/name1_0001']
})]
),
DatasetItem(id='name1_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(1, attributes={
'positive_pairs': ['name1/name1_0002']
})]
),
DatasetItem(id='name1_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(1, attributes={
'positive_pairs': ['name1/name1_0002'],
'negative_pairs': ['name0/name0_0001']
})]
),
], categories=['name0', 'name1'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir,
save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_save_images(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
})]
),
DatasetItem(id='name0_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0001'],
'negative_pairs': ['name1/name1_0001']
})]
),
DatasetItem(id='name1_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(1, attributes={})]
),
], categories=['name0', 'name1'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir,
save_images=False)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_landmarks(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001',
subset='test', image=np.ones((2, 5, 3)),
annotations=[
Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
}),
Points([0, 4, 3, 3, 2, 2, 1, 0, 3, 0]),
]
),
DatasetItem(id='name0_0002',
subset='test', image=np.ones((2, 5, 3)),
annotations=[
Label(0),
Points([0, 5, 3, 5, 2, 2, 1, 0, 3, 0]),
]
),
], categories=['name0'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_subsets(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
})],
),
DatasetItem(id='name0_0002',
image=np.ones((2, 5, 3)),
annotations=[Label(0)]
),
], categories=['name0'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_format_names(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='a/1',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/b/2'],
'negative_pairs': ['d/4']
})],
),
DatasetItem(id='b/2',
image=np.ones((2, 5, 3)),
annotations=[Label(0)]
),
DatasetItem(id='c/3',
image=np.ones((2, 5, 3)),
annotations=[Label(1)]
),
DatasetItem(id='d/4',
image=np.ones((2, 5, 3)),
),
], categories=['name0', 'name1'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
dataset = Dataset.from_iterable([
DatasetItem(id='кириллица с пробелом',
image=np.ones((2, 5, 3))
),
DatasetItem(id='name0_0002',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'negative_pairs': ['кириллица с пробелом']
})]
),
], categories=['name0'])
with TestDir() as test_dir:
LfwConverter.convert(dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, dataset, parsed_dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
dataset = Dataset.from_iterable([
DatasetItem(id='a/1', image=Image(
path='a/1.JPEG', data=np.zeros((4, 3, 3))),
),
DatasetItem(id='b/c/d/2', image=Image(
path='b/c/d/2.bmp', data=np.zeros((3, 4, 3))),
),
], categories=[])
with TestDir() as test_dir:
LfwConverter.convert(dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, dataset, parsed_dataset, require_images=True)
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'lfw_dataset')
class LfwImporterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
self.assertTrue(LfwImporter.detect(DUMMY_DATASET_DIR))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[
Label(0, attributes={
'negative_pairs': ['name1/name1_0001',
'name1/name1_0002']
}),
Points([0, 4, 3, 3, 2, 2, 1, 0, 3, 0]),
]
),
DatasetItem(id='name1_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[
Label(1, attributes={
'positive_pairs': ['name1/name1_0002'],
}),
Points([1, 6, 4, 6, 3, 3, 2, 1, 4, 1]),
]
),
DatasetItem(id='name1_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[
Label(1),
Points([0, 5, 3, 5, 2, 2, 1, 0, 3, 0]),
]
),
], categories=['name0', 'name1'])
dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'lfw')
compare_datasets(self, expected_dataset, dataset)
| [
"numpy.ones",
"datumaro.util.test_utils.compare_datasets",
"datumaro.util.test_utils.TestDir",
"datumaro.plugins.lfw_format.LfwConverter.convert",
"datumaro.components.annotation.Label",
"datumaro.plugins.lfw_format.LfwImporter.detect",
"os.path.dirname",
"numpy.zeros",
"datumaro.components.annotati... | [((7684, 7705), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (7695, 7705), True, 'import os.path as osp\n'), ((9112, 9157), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['DUMMY_DATASET_DIR', '"""lfw"""'], {}), "(DUMMY_DATASET_DIR, 'lfw')\n", (9131, 9157), False, 'from datumaro.components.dataset import Dataset\n'), ((9167, 9216), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'expected_dataset', 'dataset'], {}), '(self, expected_dataset, dataset)\n', (9183, 9216), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((1779, 1788), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (1786, 1788), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((1814, 1878), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['source_dataset', 'test_dir'], {'save_images': '(True)'}), '(source_dataset, test_dir, save_images=True)\n', (1834, 1878), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((1924, 1960), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (1943, 1960), False, 'from datumaro.components.dataset import Dataset\n'), ((1974, 2049), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'source_dataset', 'parsed_dataset'], {'require_images': '(True)'}), '(self, source_dataset, parsed_dataset, require_images=True)\n', (1990, 2049), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((2997, 3006), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (3004, 3006), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((3032, 3097), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['source_dataset', 'test_dir'], {'save_images': '(False)'}), '(source_dataset, test_dir, save_images=False)\n', (3052, 3097), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((3143, 3179), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (3162, 3179), False, 'from datumaro.components.dataset import Dataset\n'), ((3193, 3247), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'source_dataset', 'parsed_dataset'], {}), '(self, source_dataset, parsed_dataset)\n', (3209, 3247), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((4053, 4062), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (4060, 4062), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((4088, 4152), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['source_dataset', 'test_dir'], {'save_images': '(True)'}), '(source_dataset, test_dir, save_images=True)\n', (4108, 4152), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((4182, 4218), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (4201, 4218), False, 'from datumaro.components.dataset import Dataset\n'), ((4232, 4286), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'source_dataset', 'parsed_dataset'], {}), '(self, source_dataset, parsed_dataset)\n', (4248, 4286), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((4858, 4867), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (4865, 4867), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((4893, 4957), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['source_dataset', 'test_dir'], {'save_images': '(True)'}), '(source_dataset, test_dir, save_images=True)\n', (4913, 4957), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((4987, 5023), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (5006, 5023), False, 'from datumaro.components.dataset import Dataset\n'), ((5037, 5091), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'source_dataset', 'parsed_dataset'], {}), '(self, source_dataset, parsed_dataset)\n', (5053, 5091), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((5924, 5933), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (5931, 5933), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((5959, 6023), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['source_dataset', 'test_dir'], {'save_images': '(True)'}), '(source_dataset, test_dir, save_images=True)\n', (5979, 6023), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((6053, 6089), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (6072, 6089), False, 'from datumaro.components.dataset import Dataset\n'), ((6103, 6157), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'source_dataset', 'parsed_dataset'], {}), '(self, source_dataset, parsed_dataset)\n', (6119, 6157), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((6715, 6724), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (6722, 6724), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((6750, 6807), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['dataset', 'test_dir'], {'save_images': '(True)'}), '(dataset, test_dir, save_images=True)\n', (6770, 6807), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((6837, 6873), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (6856, 6873), False, 'from datumaro.components.dataset import Dataset\n'), ((6887, 6955), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'dataset', 'parsed_dataset'], {'require_images': '(True)'}), '(self, dataset, parsed_dataset, require_images=True)\n', (6903, 6955), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((7413, 7422), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (7420, 7422), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((7448, 7505), 'datumaro.plugins.lfw_format.LfwConverter.convert', 'LfwConverter.convert', (['dataset', 'test_dir'], {'save_images': '(True)'}), '(dataset, test_dir, save_images=True)\n', (7468, 7505), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((7535, 7571), 'datumaro.components.dataset.Dataset.import_from', 'Dataset.import_from', (['test_dir', '"""lfw"""'], {}), "(test_dir, 'lfw')\n", (7554, 7571), False, 'from datumaro.components.dataset import Dataset\n'), ((7585, 7653), 'datumaro.util.test_utils.compare_datasets', 'compare_datasets', (['self', 'dataset', 'parsed_dataset'], {'require_images': '(True)'}), '(self, dataset, parsed_dataset, require_images=True)\n', (7601, 7653), False, 'from datumaro.util.test_utils import TestDir, compare_datasets\n'), ((7875, 7912), 'datumaro.plugins.lfw_format.LfwImporter.detect', 'LfwImporter.detect', (['DUMMY_DATASET_DIR'], {}), '(DUMMY_DATASET_DIR)\n', (7893, 7912), False, 'from datumaro.plugins.lfw_format import LfwConverter, LfwImporter\n'), ((709, 727), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (716, 727), True, 'import numpy as np\n'), ((952, 970), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (959, 970), True, 'import numpy as np\n'), ((1255, 1273), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (1262, 1273), True, 'import numpy as np\n'), ((1498, 1516), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (1505, 1516), True, 'import numpy as np\n'), ((2306, 2324), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (2313, 2324), True, 'import numpy as np\n'), ((2549, 2567), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (2556, 2567), True, 'import numpy as np\n'), ((2852, 2870), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (2859, 2870), True, 'import numpy as np\n'), ((3483, 3501), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (3490, 3501), True, 'import numpy as np\n'), ((3833, 3851), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (3840, 3851), True, 'import numpy as np\n'), ((4508, 4526), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (4515, 4526), True, 'import numpy as np\n'), ((4737, 4755), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (4744, 4755), True, 'import numpy as np\n'), ((5311, 5329), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (5318, 5329), True, 'import numpy as np\n'), ((5573, 5591), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (5580, 5591), True, 'import numpy as np\n'), ((5703, 5721), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (5710, 5721), True, 'import numpy as np\n'), ((5833, 5851), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (5840, 5851), True, 'import numpy as np\n'), ((6402, 6420), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (6409, 6420), True, 'import numpy as np\n'), ((6499, 6517), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (6506, 6517), True, 'import numpy as np\n'), ((8129, 8147), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (8136, 8147), True, 'import numpy as np\n'), ((8527, 8545), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (8534, 8545), True, 'import numpy as np\n'), ((8878, 8896), 'numpy.ones', 'np.ones', (['(2, 5, 3)'], {}), '((2, 5, 3))\n', (8885, 8896), True, 'import numpy as np\n'), ((758, 819), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/name0_0002']}"}), "(0, attributes={'positive_pairs': ['name0/name0_0002']})\n", (763, 819), False, 'from datumaro.components.annotation import Label, Points\n'), ((1001, 1106), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/name0_0001'], 'negative_pairs': ['name1/name1_0001']\n }"}), "(0, attributes={'positive_pairs': ['name0/name0_0001'],\n 'negative_pairs': ['name1/name1_0001']})\n", (1006, 1106), False, 'from datumaro.components.annotation import Label, Points\n'), ((1304, 1365), 'datumaro.components.annotation.Label', 'Label', (['(1)'], {'attributes': "{'positive_pairs': ['name1/name1_0002']}"}), "(1, attributes={'positive_pairs': ['name1/name1_0002']})\n", (1309, 1365), False, 'from datumaro.components.annotation import Label, Points\n'), ((1547, 1652), 'datumaro.components.annotation.Label', 'Label', (['(1)'], {'attributes': "{'positive_pairs': ['name1/name1_0002'], 'negative_pairs': ['name0/name0_0001']\n }"}), "(1, attributes={'positive_pairs': ['name1/name1_0002'],\n 'negative_pairs': ['name0/name0_0001']})\n", (1552, 1652), False, 'from datumaro.components.annotation import Label, Points\n'), ((2355, 2416), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/name0_0002']}"}), "(0, attributes={'positive_pairs': ['name0/name0_0002']})\n", (2360, 2416), False, 'from datumaro.components.annotation import Label, Points\n'), ((2598, 2703), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/name0_0001'], 'negative_pairs': ['name1/name1_0001']\n }"}), "(0, attributes={'positive_pairs': ['name0/name0_0001'],\n 'negative_pairs': ['name1/name1_0001']})\n", (2603, 2703), False, 'from datumaro.components.annotation import Label, Points\n'), ((2901, 2924), 'datumaro.components.annotation.Label', 'Label', (['(1)'], {'attributes': '{}'}), '(1, attributes={})\n', (2906, 2924), False, 'from datumaro.components.annotation import Label, Points\n'), ((3553, 3614), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/name0_0002']}"}), "(0, attributes={'positive_pairs': ['name0/name0_0002']})\n", (3558, 3614), False, 'from datumaro.components.annotation import Label, Points\n'), ((3682, 3720), 'datumaro.components.annotation.Points', 'Points', (['[0, 4, 3, 3, 2, 2, 1, 0, 3, 0]'], {}), '([0, 4, 3, 3, 2, 2, 1, 0, 3, 0])\n', (3688, 3720), False, 'from datumaro.components.annotation import Label, Points\n'), ((3903, 3911), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {}), '(0)\n', (3908, 3911), False, 'from datumaro.components.annotation import Label, Points\n'), ((3933, 3971), 'datumaro.components.annotation.Points', 'Points', (['[0, 5, 3, 5, 2, 2, 1, 0, 3, 0]'], {}), '([0, 5, 3, 5, 2, 2, 1, 0, 3, 0])\n', (3939, 3971), False, 'from datumaro.components.annotation import Label, Points\n'), ((4557, 4618), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/name0_0002']}"}), "(0, attributes={'positive_pairs': ['name0/name0_0002']})\n", (4562, 4618), False, 'from datumaro.components.annotation import Label, Points\n'), ((4786, 4794), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {}), '(0)\n', (4791, 4794), False, 'from datumaro.components.annotation import Label, Points\n'), ((5360, 5446), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'positive_pairs': ['name0/b/2'], 'negative_pairs': ['d/4']}"}), "(0, attributes={'positive_pairs': ['name0/b/2'], 'negative_pairs': [\n 'd/4']})\n", (5365, 5446), False, 'from datumaro.components.annotation import Label, Points\n'), ((5622, 5630), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {}), '(0)\n', (5627, 5630), False, 'from datumaro.components.annotation import Label, Points\n'), ((5752, 5760), 'datumaro.components.annotation.Label', 'Label', (['(1)'], {}), '(1)\n', (5757, 5760), False, 'from datumaro.components.annotation import Label, Points\n'), ((6548, 6613), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'negative_pairs': ['кириллица с пробелом']}"}), "(0, attributes={'negative_pairs': ['кириллица с пробелом']})\n", (6553, 6613), False, 'from datumaro.components.annotation import Label, Points\n'), ((8199, 8284), 'datumaro.components.annotation.Label', 'Label', (['(0)'], {'attributes': "{'negative_pairs': ['name1/name1_0001', 'name1/name1_0002']}"}), "(0, attributes={'negative_pairs': ['name1/name1_0001',\n 'name1/name1_0002']})\n", (8204, 8284), False, 'from datumaro.components.annotation import Label, Points\n'), ((8376, 8414), 'datumaro.components.annotation.Points', 'Points', (['[0, 4, 3, 3, 2, 2, 1, 0, 3, 0]'], {}), '([0, 4, 3, 3, 2, 2, 1, 0, 3, 0])\n', (8382, 8414), False, 'from datumaro.components.annotation import Label, Points\n'), ((8597, 8658), 'datumaro.components.annotation.Label', 'Label', (['(1)'], {'attributes': "{'positive_pairs': ['name1/name1_0002']}"}), "(1, attributes={'positive_pairs': ['name1/name1_0002']})\n", (8602, 8658), False, 'from datumaro.components.annotation import Label, Points\n'), ((8727, 8765), 'datumaro.components.annotation.Points', 'Points', (['[1, 6, 4, 6, 3, 3, 2, 1, 4, 1]'], {}), '([1, 6, 4, 6, 3, 3, 2, 1, 4, 1])\n', (8733, 8765), False, 'from datumaro.components.annotation import Label, Points\n'), ((8948, 8956), 'datumaro.components.annotation.Label', 'Label', (['(1)'], {}), '(1)\n', (8953, 8956), False, 'from datumaro.components.annotation import Label, Points\n'), ((8978, 9016), 'datumaro.components.annotation.Points', 'Points', (['[0, 5, 3, 5, 2, 2, 1, 0, 3, 0]'], {}), '([0, 5, 3, 5, 2, 2, 1, 0, 3, 0])\n', (8984, 9016), False, 'from datumaro.components.annotation import Label, Points\n'), ((7207, 7226), 'numpy.zeros', 'np.zeros', (['(4, 3, 3)'], {}), '((4, 3, 3))\n', (7215, 7226), True, 'import numpy as np\n'), ((7336, 7355), 'numpy.zeros', 'np.zeros', (['(3, 4, 3)'], {}), '((3, 4, 3))\n', (7344, 7355), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from collections import Counter
import re
import string
import itertools
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.utils import resample
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import SVMSMOTE
import nltk
from gensim.corpora import Dictionary
from gensim.models.ldamulticore import LdaMulticore
from gensim.models.word2vec import Word2Vec
from gensim.parsing.preprocessing import preprocess_string
from gensim.test.utils import get_tmpfile
np.random.seed(42)
def load_and_split_df(filepath, features='text', label='readmission', index_col=0):
'''
A function to load a dataframe from a CSV and split into train/test sets
filepath: the path to the desired CSV file
features: the name of the column(s) containing feature variables
label: the name of the column containing the label
returns train and test features and labels as numpy arraysS
'''
# load the data
df = pd.read_csv(filepath, index_col=index_col)
# define text feature
text = df[features].values
# define target
target = df[label].values
# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(text,
target,
stratify=target,
test_size=0.33,
random_state=42)
return X_train, X_test, y_train, y_test
def logistic_regression(lr_params, train_feat, train_label, model,
test_feat, test_label, vec_params=None, random_state=42):
'''
A function to model data using logistic regression with under- or over-sampling.
'''
if model == 'svmsmote':
pipe = make_pipeline(CountVectorizer(**vec_params),
SVMSMOTE(random_state=random_state),
LogisticRegression(**lr_params))
elif model == 'rus':
pipe = make_pipeline(CountVectorizer(**vec_params),
RandomUnderSampler(random_state=random_state),
LogisticRegression(**lr_params))
pipe_fit = pipe.fit(train_feat, train_label)
y_pred = pipe_fit.predict(test_feat)
cnf_matrix = confusion_matrix(test_label, y_pred)
return pipe, pipe_fit, y_pred, cnf_matrix
def random_forest_undersampler(vec_params, rf_params, train_feat, train_label, test_feat,
test_label, random_state=42, tfidf=False):
'''
A function to classify text data using count vectorization, random under-sampling,
and a random forest. Returns an array of predicted labels.
vec_params = parameters for the CountVectorizer
rf_params = parameters for the RandomForestClassifier
train_features = an array of training features
test_features = an array of testing features
labels = an array of training labels
'''
if tfidf:
pipe = make_pipeline(CountVectorizer(**vec_params),
TfidfTransformer(),
RandomUnderSampler(random_state=random_state),
RandomForestClassifier(**rf_params))
else:
pipe = make_pipeline(CountVectorizer(**vec_params),
RandomUnderSampler(random_state=random_state),
RandomForestClassifier(**rf_params))
pipe_fit = pipe.fit(train_feat, train_label)
y_pred = pipe_fit.predict(test_feat)
cnf_matrix = confusion_matrix(test_label, y_pred)
return pipe, pipe_fit, y_pred, cnf_matrix
def svm_text_classification(vec_params, svm_params, train_feat, train_label, test_feat,
test_label, random_state=42):
'''
A function to classify text data using count vectorization, random under-sampling,
and a random forest.
train_features = an array of training features
test_features = an array of testing features
labels = an array of training labels
vec_params = parameters for the CountVectorizer
rf_params = parameters for the RandomForestClassifier
'''
pipe = make_pipeline(CountVectorizer(**vec_params),
RandomUnderSampler(random_state=random_state),
SVC(**svm_params))
pipe_fit = pipe.fit(train_feat, train_label)
y_pred = pipe_fit.predict(test_feat)
cnf_matrix = confusion_matrix(test_label, y_pred)
return pipe, pipe_fit, y_pred, cnf_matrix
# Word2Vec Modeling
def create_w2v_dataframe(file, idx=0, label_col='readmission', text_col='text', test_size=0.33, random_state=42):
'''
A function to load and split a dataframe for Word2Vec processing.
'''
# load dataframe
df = pd.read_csv(file, index_col=idx)
# drop all but text and label data
df = df[[label_col, text_col]]
df.columns = ['label', 'text']
df = df.reset_index(drop = True)
# split data into training and validation set
df_trn, df_val = train_test_split(df,
stratify = df.label,
test_size = test_size,
random_state = random_state)
return df_trn, df_val
def get_good_tokens(sentence):
replaced_punctation = list(map(lambda token: re.sub('[^0-9A-Za-z!?]+', '', token), sentence))
removed_punctation = list(filter(lambda token: token, replaced_punctation))
return removed_punctation
def lda_get_good_tokens(df):
df['tokenized_text'] = list(map(nltk.word_tokenize, df.text))
df['tokenized_text'] = list(map(get_good_tokens, df.tokenized_text))
def remove_stopwords(df):
# define stopwords
stopwords = nltk.corpus.stopwords.words('english')
# remove stopwords
df['stopwords_removed'] = list(map(lambda doc:
[word for word in doc if word not in stopwords],
df.tokenized_text))
def stem_words(df):
lemm = nltk.stem.WordNetLemmatizer()
df['lemmatized_text'] = list(map(lambda sentence:
list(map(lemm.lemmatize, sentence)),
df.stopwords_removed))
p_stemmer = nltk.stem.porter.PorterStemmer()
df['stemmed_text'] = list(map(lambda sentence:
list(map(p_stemmer.stem, sentence)),
df.lemmatized_text))
def document_to_lda_features(lda_model, document):
""" Transforms a bag of words document to features.
It returns the proportion of how much each topic was
present in the document.
"""
topic_importances = lda_model.get_document_topics(document, minimum_probability=0)
topic_importances = np.array(topic_importances)
return topic_importances[:,1]
def w2v_preprocessing(df):
""" All the preprocessing steps for word2vec are done in this function.
All mutations are done on the dataframe itself. So this function returns
nothing.
"""
df['text'] = df.text.str.lower()
df['document_sentences'] = df.text.str.split('.') # split texts into individual sentences
df['tokenized_sentences'] = list(map(lambda sentences:
list(map(nltk.word_tokenize, sentences)),
df.document_sentences)) # tokenize sentences
df['tokenized_sentences'] = list(map(lambda sentences:
list(map(get_good_tokens, sentences)),
df.tokenized_sentences)) # remove unwanted characters
df['tokenized_sentences'] = list(map(lambda sentences:
list(filter(lambda lst: lst, sentences)),
df.tokenized_sentences)) # remove empty lists
def get_w2v_features(w2v_model, sentence_group):
"""
Transform a sentence_group (containing multiple lists
of words) into a feature vector. It averages out all the
word vectors of the sentence_group.
"""
chain = itertools.chain(*sentence_group)
inner_chain = itertools.chain(*chain)
words = np.array(list(inner_chain)) # words in text
index2word_set = set(w2v_model.wv.vocab.keys()) # words known to model
featureVec = np.zeros(w2v_model.vector_size, dtype="float32")
# Initialize a counter for number of words in a review
nwords = 0
# Loop over each word in the comment and, if it is in the model's vocabulary, add its feature vector to the total
for word in words:
if word in index2word_set:
featureVec = np.add(featureVec, w2v_model[word])
nwords += 1.
# Divide the result by the number of words to get the average
if nwords > 0:
featureVec = np.divide(featureVec, nwords)
return featureVec
def word2vec_logistic_regression(train_feat, train_label, test_feat,
test_label, model='w2v_lda',
lr_params = {'solver':'liblinear','penalty':'l2',
'random_state':42}):
'''
A function to model data using logistic regression.
lr_params : a dictionary of hyperparameters to pass to logistic regression.
'''
if model == 'w2v_lda':
clf = LogisticRegression(**lr_params)
clf_fit = clf.fit(train_feat, train_label)
y_pred = clf_fit.predict(test_feat)
cnf_matrix = confusion_matrix(test_label, y_pred)
return clf, clf_fit, y_pred, cnf_matrix
def random_undersample(df, random_state=42):
'''
A function to randomly undersample the majority class
to create a balanced dataset.
'''
# Separate majority class
df_major = df[df.label==0]
# Separate minority class
df_minor = df[df.label==1]
# Downsample majority class
df_major_undersampled = resample(df_major,
replace=False,
n_samples=len(df_minor),
random_state=random_state)
# Combine minority class with downsampled majority class
df_undersampled = pd.concat([df_major_undersampled, df_minor])
return df_undersampled
| [
"itertools.chain",
"sklearn.feature_extraction.text.TfidfTransformer",
"pandas.read_csv",
"numpy.array",
"nltk.stem.porter.PorterStemmer",
"numpy.divide",
"imblearn.under_sampling.RandomUnderSampler",
"nltk.corpus.stopwords.words",
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.concat"... | [((849, 867), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (863, 867), True, 'import numpy as np\n'), ((1320, 1362), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'index_col': 'index_col'}), '(filepath, index_col=index_col)\n', (1331, 1362), True, 'import pandas as pd\n'), ((1553, 1638), 'sklearn.model_selection.train_test_split', 'train_test_split', (['text', 'target'], {'stratify': 'target', 'test_size': '(0.33)', 'random_state': '(42)'}), '(text, target, stratify=target, test_size=0.33, random_state=42\n )\n', (1569, 1638), False, 'from sklearn.model_selection import train_test_split\n'), ((2723, 2759), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'y_pred'], {}), '(test_label, y_pred)\n', (2739, 2759), False, 'from sklearn.metrics import confusion_matrix\n'), ((3996, 4032), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'y_pred'], {}), '(test_label, y_pred)\n', (4012, 4032), False, 'from sklearn.metrics import confusion_matrix\n'), ((4917, 4953), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'y_pred'], {}), '(test_label, y_pred)\n', (4933, 4953), False, 'from sklearn.metrics import confusion_matrix\n'), ((5257, 5289), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': 'idx'}), '(file, index_col=idx)\n', (5268, 5289), True, 'import pandas as pd\n'), ((5517, 5609), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'stratify': 'df.label', 'test_size': 'test_size', 'random_state': 'random_state'}), '(df, stratify=df.label, test_size=test_size, random_state=\n random_state)\n', (5533, 5609), False, 'from sklearn.model_selection import train_test_split\n'), ((6240, 6278), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (6267, 6278), False, 'import nltk\n'), ((6538, 6567), 'nltk.stem.WordNetLemmatizer', 'nltk.stem.WordNetLemmatizer', ([], {}), '()\n', (6565, 6567), False, 'import nltk\n'), ((6773, 6805), 'nltk.stem.porter.PorterStemmer', 'nltk.stem.porter.PorterStemmer', ([], {}), '()\n', (6803, 6805), False, 'import nltk\n'), ((7305, 7332), 'numpy.array', 'np.array', (['topic_importances'], {}), '(topic_importances)\n', (7313, 7332), True, 'import numpy as np\n'), ((8645, 8677), 'itertools.chain', 'itertools.chain', (['*sentence_group'], {}), '(*sentence_group)\n', (8660, 8677), False, 'import itertools\n'), ((8696, 8719), 'itertools.chain', 'itertools.chain', (['*chain'], {}), '(*chain)\n', (8711, 8719), False, 'import itertools\n'), ((8875, 8923), 'numpy.zeros', 'np.zeros', (['w2v_model.vector_size'], {'dtype': '"""float32"""'}), "(w2v_model.vector_size, dtype='float32')\n", (8883, 8923), True, 'import numpy as np\n'), ((10067, 10103), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'y_pred'], {}), '(test_label, y_pred)\n', (10083, 10103), False, 'from sklearn.metrics import confusion_matrix\n'), ((10789, 10833), 'pandas.concat', 'pd.concat', (['[df_major_undersampled, df_minor]'], {}), '([df_major_undersampled, df_minor])\n', (10798, 10833), True, 'import pandas as pd\n'), ((4645, 4674), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '(**vec_params)\n', (4660, 4674), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((4705, 4750), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4723, 4750), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((4781, 4798), 'sklearn.svm.SVC', 'SVC', ([], {}), '(**svm_params)\n', (4784, 4798), False, 'from sklearn.svm import SVC\n'), ((9385, 9414), 'numpy.divide', 'np.divide', (['featureVec', 'nwords'], {}), '(featureVec, nwords)\n', (9394, 9414), True, 'import numpy as np\n'), ((9921, 9952), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '(**lr_params)\n', (9939, 9952), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2219, 2248), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '(**vec_params)\n', (2234, 2248), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2279, 2314), 'imblearn.over_sampling.SVMSMOTE', 'SVMSMOTE', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (2287, 2314), False, 'from imblearn.over_sampling import SVMSMOTE\n'), ((2345, 2376), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '(**lr_params)\n', (2363, 2376), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3445, 3474), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '(**vec_params)\n', (3460, 3474), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3505, 3523), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (3521, 3523), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3554, 3599), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3572, 3599), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((3630, 3665), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**rf_params)\n', (3652, 3665), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3706, 3735), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '(**vec_params)\n', (3721, 3735), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3766, 3811), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3784, 3811), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((3842, 3877), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**rf_params)\n', (3864, 3877), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((9205, 9240), 'numpy.add', 'np.add', (['featureVec', 'w2v_model[word]'], {}), '(featureVec, w2v_model[word])\n', (9211, 9240), True, 'import numpy as np\n'), ((2432, 2461), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '(**vec_params)\n', (2447, 2461), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2492, 2537), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (2510, 2537), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((2568, 2599), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '(**lr_params)\n', (2586, 2599), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5840, 5876), 're.sub', 're.sub', (['"""[^0-9A-Za-z!?]+"""', '""""""', 'token'], {}), "('[^0-9A-Za-z!?]+', '', token)\n", (5846, 5876), False, 'import re\n')] |
#!/usr/bin/env python
from TurbAn.Utilities.subs import *
def pgmultiplt(rc,variables,bs,fs,step,pgcmp,smooth,numsmooth):
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
rcd=rc.__dict__
if smooth == 'y':
from scipy.ndimage import gaussian_filter as gf
# Create the window
app=QtGui.QApplication([])
win=pg.GraphicsWindow(title="Multiplot-Test")
win.resize(1000,600)
pg.setConfigOptions(antialias=True)
#Create the canvas
pltdict={}; imgdict={}
for j in rc.vars2l:
idx=rc.vars2l.index(j)
if idx < 4:
haha=[]
elif np.mod(idx,4) == 0:
win.nextRow()
exec('p'+j+'=win.addPlot()')
exec('pltdict["'+j+'"]=p'+j)
if (rc.ny > 1):
exec('img'+j+'=pg.ImageItem()')
exec('imgdict["'+j+'"]=img'+j)
pltdict[j].addItem(imgdict[j])
# pltdict[j].setAspectLocked()
win.show()
# Loop for plottting multiple time slices
for it in range(bs,fs,step):
print('Reading time slice ', it)
rc.loadslice(it)
# Make plots
for j in rc.vars2l:
if (rc.ny==1 and rc.nz==1):
pltdict[j].plot(rc.xx,rcd[j][:,0,0],clear=True)
else:
if smooth == 'y':
imgdict[j].setImage(gf(rcd[j][:,::-1,0].T,sigma=numsmooth),clear=True,lut=pgcmp)
else:
imgdict[j].setImage(rcd[j][:,::-1,0].T,clear=True,lut=pgcmp)
pltdict[j].setTitle(j+' '+sminmax(rcd[j]))
pg.QtGui.QApplication.processEvents()
haha=input('Hello?')
print('All done!')
if __name__=="__main__":
rc=create_object()
variables=input("Variables to load, e.g. all, min, bx by bz: ").split()
rc.vars2load(variables)
bs,fs,step=ask_for_steps(rc.numslices)
smooth=input("Smooth data? y/[n] ")
if smooth == 'y':
numsmooth=int(input("How much? "))
cmp=input("Cmap? [BuRd], RdBu, TrmBlk, bryw: ")
if cmp == '':
cmp='BuRd'
pgcmp = getpgcmap(cmp=cmp)
pgmultiplt(rc,variables,bs,fs,step,pgcmp,smooth,numsmooth)
rc.fin()
| [
"scipy.ndimage.gaussian_filter",
"pyqtgraph.setConfigOptions",
"pyqtgraph.Qt.QtGui.QApplication",
"pyqtgraph.QtGui.QApplication.processEvents",
"pyqtgraph.GraphicsWindow",
"numpy.mod"
] | [((350, 372), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (368, 372), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((381, 422), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {'title': '"""Multiplot-Test"""'}), "(title='Multiplot-Test')\n", (398, 422), True, 'import pyqtgraph as pg\n'), ((452, 487), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'antialias': '(True)'}), '(antialias=True)\n', (471, 487), True, 'import pyqtgraph as pg\n'), ((1554, 1591), 'pyqtgraph.QtGui.QApplication.processEvents', 'pg.QtGui.QApplication.processEvents', ([], {}), '()\n', (1589, 1591), True, 'import pyqtgraph as pg\n'), ((646, 660), 'numpy.mod', 'np.mod', (['idx', '(4)'], {}), '(idx, 4)\n', (652, 660), True, 'import numpy as np\n'), ((1337, 1378), 'scipy.ndimage.gaussian_filter', 'gf', (['rcd[j][:, ::-1, 0].T'], {'sigma': 'numsmooth'}), '(rcd[j][:, ::-1, 0].T, sigma=numsmooth)\n', (1339, 1378), True, 'from scipy.ndimage import gaussian_filter as gf\n')] |
#!/usr/bin/env python3
import os
import logging
#logging.basicConfig(level=logging.DEBUG)
from time import sleep
from keithley2600 import Keithley2600
import numpy as np
import saleae
np.set_printoptions(precision=2)
instrument_serial = 'USB0::fc00:db20:35b:7399::5::4309410\x00::0::INSTR'
dirname = os.path.abspath("traces/")
if not os.path.exists(dirname):
os.makedirs(dirname)
currents = np.append([0], 1.5*np.power(10.0, np.arange(-6, 0)))
ranges = [100E-6, 100E-6, 10E-3, 10E-3, 10E-3, 10E-3, 100E-3, 1]
voltages = np.arange(2.3, 3.31, .05)
measurements = np.zeros((len(currents), len(voltages), 6))
#print(measurements.shape)
#print(measurements)
#print(currents)
#print(voltages)
#
#for j, _ in enumerate(measurements):
# print()
# print(currents[j])
# for k, _ in enumerate(measurements[j]):
# print(round(voltages[k], 2))
def avg_ten():
a_i = 0.
a_v = 0.
b_i = 0.
b_v = 0.
for i in range(10):
a_i += k.smua.measure.i()
a_v += k.smua.measure.v()
b_i += k.smub.measure.i()
b_v += k.smub.measure.v()
return [a_v/10, a_i/10, b_v/10, b_i/10]
s = saleae.Saleae()
s.set_trigger_one_channel(0, saleae.Trigger.Negedge)
k = Keithley2600(instrument_serial)
k.smua.reset()
k.smub.reset()
k.smua.source.output = k.smua.OUTPUT_OFF
k.smub.source.output = k.smub.OUTPUT_OFF
k.smua.sense = k.smua.SENSE_LOCAL
k.smub.sense = k.smub.SENSE_LOCAL
k.smua.measure.nplc = 5
k.smub.measure.nplc = 5
k.smua.measure.autorangei = k.smua.AUTORANGE_ON
k.smua.measure.delay = -1
k.smua.source.func = k.smua.OUTPUT_DCVOLTS
k.smua.source.rangev = 20
#k.smua.measure.rangei = 10E-3
k.smua.source.limiti = 500E-3
k.smub.measure.autorangei = k.smub.AUTORANGE_ON
k.smub.measure.delay = -1
k.smub.source.func = k.smub.OUTPUT_DCAMPS
k.smub.source.limitv = 4
#k.smub.source.rangei = 10E-3
k.smub.source.sink = k.smub.ENABLE
k.smub.source.output = k.smub.OUTPUT_ON
k.smua.source.output = k.smua.OUTPUT_ON
for l, i in enumerate(currents):
for m, v in enumerate(voltages):
print('running ' + str(i) + ' Amps, ' + str(round(v, 2)) + ' Volts')
k.smua.source.levelv = round(v, 2)
k.smua.measure.lowrangei = ranges[l]
print(i)
if l is not 0:
k.smub.source.leveli = -i
s.set_capture_seconds(5/(10 ** (l - 1)))
#print('capture seconds: ' + str(5/(10 ** (l - 1))))
k.smub.source.output = k.smub.OUTPUT_ON
else:
k.smub.source.output = k.smub.OUTPUT_HIGH_Z
s.set_capture_seconds(10)
s.capture_start_and_wait_until_finished()
s.export_data2(dirname + '/' + str(i) + '_' + str(round(v, 2)))
k_meas = avg_ten()
# current compensation for saleae measurement
# saleae has ~2Mohm impedence
#k_meas[-1] += -k_meas[2] / 2E6
measurements[l, m, 0] = i
measurements[l, m, 1] = round(v, 2)
measurements[l, m, 2:] = avg_ten()
print(measurements[l, m])
#k.smua.source.output = k.smua.OUTPUT_OFF
#k.smub.source.output = k.smub.OUTPUT_HIGH_Z
np.save(dirname + '/measurements.npy', measurements)
print(measurements)
k.smua.source.output = k.smua.OUTPUT_OFF
k.smub.source.output = k.smub.OUTPUT_OFF
#k.smua.reset()
#k.smub.reset()
| [
"os.path.exists",
"os.makedirs",
"numpy.set_printoptions",
"saleae.Saleae",
"os.path.abspath",
"numpy.save",
"numpy.arange",
"keithley2600.Keithley2600"
] | [((185, 217), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (204, 217), True, 'import numpy as np\n'), ((302, 328), 'os.path.abspath', 'os.path.abspath', (['"""traces/"""'], {}), "('traces/')\n", (317, 328), False, 'import os\n'), ((526, 552), 'numpy.arange', 'np.arange', (['(2.3)', '(3.31)', '(0.05)'], {}), '(2.3, 3.31, 0.05)\n', (535, 552), True, 'import numpy as np\n'), ((1130, 1145), 'saleae.Saleae', 'saleae.Saleae', ([], {}), '()\n', (1143, 1145), False, 'import saleae\n'), ((1204, 1235), 'keithley2600.Keithley2600', 'Keithley2600', (['instrument_serial'], {}), '(instrument_serial)\n', (1216, 1235), False, 'from keithley2600 import Keithley2600\n'), ((3088, 3140), 'numpy.save', 'np.save', (["(dirname + '/measurements.npy')", 'measurements'], {}), "(dirname + '/measurements.npy', measurements)\n", (3095, 3140), True, 'import numpy as np\n'), ((336, 359), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (350, 359), False, 'import os\n'), ((365, 385), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (376, 385), False, 'import os\n'), ((431, 447), 'numpy.arange', 'np.arange', (['(-6)', '(0)'], {}), '(-6, 0)\n', (440, 447), True, 'import numpy as np\n')] |
from numpy import full, nan
from pandas import DataFrame, concat
from .call_function_with_multiprocess import call_function_with_multiprocess
from .compute_1d_array_context import compute_1d_array_context
from .split_dataframe import split_dataframe
def _make_context_matrix(
dataframe,
skew_t_pdf_fit_parameter,
n_grid,
degree_of_freedom_for_tail_reduction,
multiply_distance_from_reference_argmax,
global_location,
global_scale,
global_degree_of_freedom,
global_shape,
):
context_matrix = full(dataframe.shape, nan)
n = dataframe.shape[0]
n_per_print = max(1, n // 10)
for i, (index, series) in enumerate(dataframe.iterrows()):
if not i % n_per_print:
print("({}/{}) {} ...".format(i + 1, n, index))
if skew_t_pdf_fit_parameter is None:
n_data = location = scale = degree_of_freedom = shape = None
else:
n_data, location, scale, degree_of_freedom, shape = skew_t_pdf_fit_parameter.loc[
index, ["N Data", "Location", "Scale", "Degree of Freedom", "Shape"]
]
context_matrix[i] = compute_1d_array_context(
series.values,
n_data=n_data,
location=location,
scale=scale,
degree_of_freedom=degree_of_freedom,
shape=shape,
n_grid=n_grid,
degree_of_freedom_for_tail_reduction=degree_of_freedom_for_tail_reduction,
multiply_distance_from_reference_argmax=multiply_distance_from_reference_argmax,
global_location=global_location,
global_scale=global_scale,
global_degree_of_freedom=global_degree_of_freedom,
global_shape=global_shape,
)["context_like_array"]
return DataFrame(context_matrix, index=dataframe.index, columns=dataframe.columns)
def make_context_matrix(
dataframe,
n_job=1,
skew_t_pdf_fit_parameter=None,
n_grid=1e3,
degree_of_freedom_for_tail_reduction=1e8,
multiply_distance_from_reference_argmax=False,
global_location=None,
global_scale=None,
global_degree_of_freedom=None,
global_shape=None,
output_file_path=None,
):
context_matrix = concat(
call_function_with_multiprocess(
_make_context_matrix,
(
(
dataframe_,
skew_t_pdf_fit_parameter,
n_grid,
degree_of_freedom_for_tail_reduction,
multiply_distance_from_reference_argmax,
global_location,
global_scale,
global_degree_of_freedom,
global_shape,
)
for dataframe_ in split_dataframe(
dataframe, 0, min(dataframe.shape[0], n_job)
)
),
n_job,
)
)
if output_file_path is not None:
context_matrix.to_csv(output_file_path, sep="\t")
return context_matrix
| [
"pandas.DataFrame",
"numpy.full"
] | [((535, 561), 'numpy.full', 'full', (['dataframe.shape', 'nan'], {}), '(dataframe.shape, nan)\n', (539, 561), False, 'from numpy import full, nan\n'), ((1788, 1863), 'pandas.DataFrame', 'DataFrame', (['context_matrix'], {'index': 'dataframe.index', 'columns': 'dataframe.columns'}), '(context_matrix, index=dataframe.index, columns=dataframe.columns)\n', (1797, 1863), False, 'from pandas import DataFrame, concat\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import paddle as paddle
import paddle.fluid as fluid
def generate_index(batch_size, samples_each_class):
a = np.arange(0, batch_size * batch_size) # N*N x 1
a = a.reshape(-1, batch_size) # N x N
steps = batch_size // samples_each_class
res = []
for i in range(batch_size):
step = i // samples_each_class
start = step * samples_each_class
end = (step + 1) * samples_each_class
p = []
n = []
for j, k in enumerate(a[i]):
if j >= start and j < end:
if j == i:
p.insert(0, k)
else:
p.append(k)
else:
n.append(k)
comb = p + n
res += comb
res = np.array(res).astype(np.int32)
return res
def calculate_order_dist_matrix(feature, batch_size, samples_each_class):
assert(batch_size % samples_each_class == 0)
feature = fluid.layers.reshape(feature, shape=[batch_size, -1])
ab = fluid.layers.matmul(feature, feature, False, True)
a2 = fluid.layers.square(feature)
a2 = fluid.layers.reduce_sum(a2, dim = 1)
d = fluid.layers.elementwise_add(-2*ab, a2, axis = 0)
d = fluid.layers.elementwise_add(d, a2, axis = 1)
d = fluid.layers.reshape(d, shape = [-1, 1])
index = generate_index(batch_size, samples_each_class)
index_var = fluid.layers.create_global_var(shape=[batch_size*batch_size], value=0, dtype='int32', persistable=True)
index_var = fluid.layers.assign(index, index_var)
d = fluid.layers.gather(d, index=index_var)
d = fluid.layers.reshape(d, shape=[-1, batch_size])
return d
| [
"paddle.fluid.layers.square",
"paddle.fluid.layers.create_global_var",
"paddle.fluid.layers.gather",
"paddle.fluid.layers.reduce_sum",
"numpy.array",
"paddle.fluid.layers.elementwise_add",
"paddle.fluid.layers.assign",
"paddle.fluid.layers.matmul",
"numpy.arange",
"paddle.fluid.layers.reshape"
] | [((865, 902), 'numpy.arange', 'np.arange', (['(0)', '(batch_size * batch_size)'], {}), '(0, batch_size * batch_size)\n', (874, 902), True, 'import numpy as np\n'), ((1675, 1728), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['feature'], {'shape': '[batch_size, -1]'}), '(feature, shape=[batch_size, -1])\n', (1695, 1728), True, 'import paddle.fluid as fluid\n'), ((1738, 1788), 'paddle.fluid.layers.matmul', 'fluid.layers.matmul', (['feature', 'feature', '(False)', '(True)'], {}), '(feature, feature, False, True)\n', (1757, 1788), True, 'import paddle.fluid as fluid\n'), ((1798, 1826), 'paddle.fluid.layers.square', 'fluid.layers.square', (['feature'], {}), '(feature)\n', (1817, 1826), True, 'import paddle.fluid as fluid\n'), ((1836, 1870), 'paddle.fluid.layers.reduce_sum', 'fluid.layers.reduce_sum', (['a2'], {'dim': '(1)'}), '(a2, dim=1)\n', (1859, 1870), True, 'import paddle.fluid as fluid\n'), ((1881, 1930), 'paddle.fluid.layers.elementwise_add', 'fluid.layers.elementwise_add', (['(-2 * ab)', 'a2'], {'axis': '(0)'}), '(-2 * ab, a2, axis=0)\n', (1909, 1930), True, 'import paddle.fluid as fluid\n'), ((1939, 1982), 'paddle.fluid.layers.elementwise_add', 'fluid.layers.elementwise_add', (['d', 'a2'], {'axis': '(1)'}), '(d, a2, axis=1)\n', (1967, 1982), True, 'import paddle.fluid as fluid\n'), ((1993, 2031), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['d'], {'shape': '[-1, 1]'}), '(d, shape=[-1, 1])\n', (2013, 2031), True, 'import paddle.fluid as fluid\n'), ((2109, 2218), 'paddle.fluid.layers.create_global_var', 'fluid.layers.create_global_var', ([], {'shape': '[batch_size * batch_size]', 'value': '(0)', 'dtype': '"""int32"""', 'persistable': '(True)'}), "(shape=[batch_size * batch_size], value=0,\n dtype='int32', persistable=True)\n", (2139, 2218), True, 'import paddle.fluid as fluid\n'), ((2229, 2266), 'paddle.fluid.layers.assign', 'fluid.layers.assign', (['index', 'index_var'], {}), '(index, index_var)\n', (2248, 2266), True, 'import paddle.fluid as fluid\n'), ((2275, 2314), 'paddle.fluid.layers.gather', 'fluid.layers.gather', (['d'], {'index': 'index_var'}), '(d, index=index_var)\n', (2294, 2314), True, 'import paddle.fluid as fluid\n'), ((2323, 2370), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['d'], {'shape': '[-1, batch_size]'}), '(d, shape=[-1, batch_size])\n', (2343, 2370), True, 'import paddle.fluid as fluid\n'), ((1491, 1504), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1499, 1504), True, 'import numpy as np\n')] |
"""
Routines for solving the KS equations via Numerov's method
"""
# standard libs
import os
import shutil
# external libs
import numpy as np
from scipy.sparse.linalg import eigsh, eigs
from scipy.linalg import eigh, eig
from joblib import Parallel, delayed, dump, load
# from staticKS import Orbitals
# internal libs
import config
import mathtools
import writeoutput
# @writeoutput.timing
def matrix_solve(v, xgrid):
"""
Solves the radial KS equation using an implementation of Numerov's method using matrix diagonalization (see notes)
Parameters
----------
v : ndarray
the KS potential on the log grid
xgrid : ndarray
the logarithmic grid
Returns
-------
eigfuncs : ndarray
the radial KS eigenfunctions on the log grid
eigvals : ndarray
the KS eigenvalues
Notes
-----
The implementation is based on the following paper:
<NAME>, <NAME>, and <NAME> , "Matrix Numerov method for solving Schrödinger’s equation",
American Journal of Physics 80, 1017-1019 (2012) https://doi.org/10.1119/1.4748813
The matrix diagonalization is of the form:
..math :: \hat{H} \ket{X} = \lambda \hat{B} \ket{X}
..math :: \hat{H} = \hat{T} + \hat{B}\hat{V},\ \hat{T} = -0.5*\hat{p}*\hat{A}
See the referenced paper for the definitions of the matrices :math: '\hat{A}'
and :math: 'hat{B}'
"""
N = config.grid_params["ngrid"]
# define the spacing of the xgrid
dx = xgrid[1] - xgrid[0]
# number of grid points
# Set-up the following matrix diagonalization problem
# H*|u>=E*B*|u>; H=T+B*V; T=-p*A
# |u> is related to the radial eigenfunctions R(r) via R(x)=exp(x/2)u(x)
# off-diagonal matrices
I_minus = np.eye(N, k=-1)
I_zero = np.eye(N)
I_plus = np.eye(N, k=1)
p = np.zeros((N, N)) # transformation for kinetic term on log grid
np.fill_diagonal(p, np.exp(-2 * xgrid))
# see referenced paper for definitions of A and B matrices
A = np.matrix((I_minus - 2 * I_zero + I_plus) / dx ** 2)
B = np.matrix((I_minus + 10 * I_zero + I_plus) / 12)
# von neumann boundary conditions
if config.bc == "neumann":
A[N - 2, N - 1] = 2 * dx ** (-2)
B[N - 2, N - 1] = 2 * B[N - 2, N - 1]
A[N - 1, N - 1] = A[N - 1, N - 1] + 1.0 / dx
B[N - 1, N - 1] = B[N - 1, N - 1] - dx / 12.0
# construct kinetic energy matrix
T = -0.5 * p * A
# solve in serial or parallel - serial mostly useful for debugging
if config.numcores > 0:
eigfuncs, eigvals = KS_matsolve_parallel(T, B, v, xgrid)
else:
eigfuncs, eigvals = KS_matsolve_serial(T, B, v, xgrid)
return eigfuncs, eigvals
def KS_matsolve_parallel(T, B, v, xgrid):
"""
Solves the KS matrix diagonalization by parallelizing over
config.ncores cores
Parameters
----------
T : ndarray
kinetic energy array
B : ndarray
off-diagonal array (for RHS of eigenvalue problem)
v : ndarray
KS potential array
xgrid: ndarray
the logarithmic grid
Returns
-------
eigfuncs : ndarray
radial KS wfns
eigvals : ndarray
KS eigenvalues
"""
# compute the number of grid points
N = np.size(xgrid)
# initialize empty potential matrix
V_mat = np.zeros((N, N))
# Compute the number pmax of distinct diagonizations to be solved
pmax = config.spindims * config.lmax
# now flatten the potential matrix over spins
v_flat = np.zeros((pmax, N))
for i in range(np.shape(v)[0]):
for l in range(config.lmax):
v_flat[l + (i * config.lmax)] = v[i] + 0.5 * (l + 0.5) ** 2 * np.exp(
-2 * xgrid
)
# make temporary folder to store arrays
joblib_folder = "./joblib_memmap"
try:
os.mkdir(joblib_folder)
except FileExistsError:
pass
# dump and load the large numpy arrays from file
data_filename_memmap = os.path.join(joblib_folder, "data_memmap")
dump((T, B, v_flat), data_filename_memmap)
T, B, v_flat = load(data_filename_memmap, mmap_mode="r")
# set up the parallel job
with Parallel(n_jobs=config.numcores) as parallel:
X = parallel(
delayed(diag_H)(q, T, B, v_flat, xgrid, config.nmax, config.bc)
for q in range(pmax)
)
# remove the joblib arrays
try:
shutil.rmtree(joblib_folder)
except: # noqa
print("Could not clean-up automatically.")
# retrieve the eigfuncs and eigvals from the joblib output
eigfuncs_flat = np.zeros((pmax, config.nmax, N))
eigvals_flat = np.zeros((pmax, config.nmax))
for q in range(pmax):
eigfuncs_flat[q] = X[q][0]
eigvals_flat[q] = X[q][1]
# unflatten eigfuncs / eigvals so they return to original shape
eigfuncs = eigfuncs_flat.reshape(config.spindims, config.lmax, config.nmax, N)
eigvals = eigvals_flat.reshape(config.spindims, config.lmax, config.nmax)
return eigfuncs, eigvals
def KS_matsolve_serial(T, B, v, xgrid):
"""
Solves the KS equations via matrix diagonalization in serial
Parameters
----------
T : ndarray
kinetic energy array
B : ndarray
off-diagonal array (for RHS of eigenvalue problem)
v : ndarray
KS potential array
xgrid: ndarray
the logarithmic grid
Returns
-------
eigfuncs : ndarray
radial KS wfns
eigvals : ndarray
KS eigenvalues
"""
# compute the number of grid points
N = np.size(xgrid)
# initialize empty potential matrix
V_mat = np.zeros((N, N))
# initialize the eigenfunctions and their eigenvalues
eigfuncs = np.zeros((config.spindims, config.lmax, config.nmax, N))
eigvals = np.zeros((config.spindims, config.lmax, config.nmax))
# A new Hamiltonian has to be re-constructed for every value of l and each spin channel if spin-polarized
for l in range(config.lmax):
# diagonalize Hamiltonian using scipy
for i in range(np.shape(v)[0]):
# fill potential matrices
np.fill_diagonal(V_mat, v[i] + 0.5 * (l + 0.5) ** 2 * np.exp(-2 * xgrid))
# construct Hamiltonians
H = T + B * V_mat
# we seek the lowest nmax eigenvalues from sparse matrix diagonalization
# use `shift-invert mode' (sigma=0) and pick lowest magnitude ("LM") eigs
# sigma=0 seems to cause numerical issues so use a small offset
eigs_up, vecs_up = eigs(H, k=config.nmax, M=B, which="LM", sigma=0.0001)
eigfuncs[i, l], eigvals[i, l] = update_orbs(
vecs_up, eigs_up, xgrid, config.bc
)
return eigfuncs, eigvals
def diag_H(p, T, B, v, xgrid, nmax, bc):
"""
Diagonilizes the Hamiltonian for the input potential v[p] using scipy's
sparse matrix solver scipy.sparse.linalg.eigs
Parameters
----------
p : int
the desired index of the input array v to solve for
T : ndarray
the kinetic energy matrix
B : ndarray
the off diagonal matrix multiplying V and RHS
xgrid : ndarray
the logarithmic grid
nmax : int
number of eigenvalues returned by the sparse matrix diagonalization
bc : str
the boundary condition
Returns
-------
evecs : ndarray
the KS radial eigenfunctions
evals : ndarray
the KS eigenvalues
"""
# compute the number of grid points
N = np.size(xgrid)
# initialize empty potential matrix
V_mat = np.zeros((N, N))
# fill potential matrices
# np.fill_diagonal(V_mat, v + 0.5 * (l + 0.5) ** 2 * np.exp(-2 * xgrid))
np.fill_diagonal(V_mat, v[p])
# construct Hamiltonians
H = T + B * V_mat
# we seek the lowest nmax eigenvalues from sparse matrix diagonalization
# use `shift-invert mode' (sigma=0) and pick lowest magnitude ("LM") eigs
# sigma=0 seems to cause numerical issues so use a small offset
evals, evecs = eigs(H, k=nmax, M=B, which="LM", sigma=0.0001)
# sort and normalize
evecs, evals = update_orbs(evecs, evals, xgrid, bc)
return evecs, evals
def update_orbs(l_eigfuncs, l_eigvals, xgrid, bc):
"""
Sorts the eigenvalues and functions by ascending order in energy and normalizes the eigenfunctions
Parameters
----------
l_eigfuncs : ndarray
input (unsorted and unnormalized) eigenfunctions (for given l and spin)
l_eigvals : ndarray
input (unsorted) eigenvalues (for given l and spin)
xgrid : ndarray
the logarithmic grid
bc : str
the boundary condition
Returns
-------
eigfuncs : ndarray
sorted and normalized eigenfunctions
eigvals : ndarray
sorted eigenvalues in ascending energy
"""
# Sort eigenvalues in ascending order
idr = np.argsort(l_eigvals)
eigvals = np.array(l_eigvals[idr].real)
# under neumann bc the RHS pt is junk, convert to correct value
if bc == "neumann":
l_eigfuncs[-1] = l_eigfuncs[-2]
eigfuncs = np.array(np.transpose(l_eigfuncs.real)[idr])
eigfuncs = mathtools.normalize_orbs(eigfuncs, xgrid) # normalize
return eigfuncs, eigvals
| [
"numpy.argsort",
"numpy.array",
"numpy.exp",
"os.mkdir",
"joblib.load",
"joblib.dump",
"numpy.eye",
"numpy.size",
"numpy.fill_diagonal",
"numpy.shape",
"numpy.transpose",
"scipy.sparse.linalg.eigs",
"mathtools.normalize_orbs",
"os.path.join",
"joblib.Parallel",
"numpy.zeros",
"shutil... | [((1744, 1759), 'numpy.eye', 'np.eye', (['N'], {'k': '(-1)'}), '(N, k=-1)\n', (1750, 1759), True, 'import numpy as np\n'), ((1773, 1782), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (1779, 1782), True, 'import numpy as np\n'), ((1796, 1810), 'numpy.eye', 'np.eye', (['N'], {'k': '(1)'}), '(N, k=1)\n', (1802, 1810), True, 'import numpy as np\n'), ((1820, 1836), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (1828, 1836), True, 'import numpy as np\n'), ((2000, 2052), 'numpy.matrix', 'np.matrix', (['((I_minus - 2 * I_zero + I_plus) / dx ** 2)'], {}), '((I_minus - 2 * I_zero + I_plus) / dx ** 2)\n', (2009, 2052), True, 'import numpy as np\n'), ((2061, 2109), 'numpy.matrix', 'np.matrix', (['((I_minus + 10 * I_zero + I_plus) / 12)'], {}), '((I_minus + 10 * I_zero + I_plus) / 12)\n', (2070, 2109), True, 'import numpy as np\n'), ((3256, 3270), 'numpy.size', 'np.size', (['xgrid'], {}), '(xgrid)\n', (3263, 3270), True, 'import numpy as np\n'), ((3323, 3339), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (3331, 3339), True, 'import numpy as np\n'), ((3516, 3535), 'numpy.zeros', 'np.zeros', (['(pmax, N)'], {}), '((pmax, N))\n', (3524, 3535), True, 'import numpy as np\n'), ((3978, 4020), 'os.path.join', 'os.path.join', (['joblib_folder', '"""data_memmap"""'], {}), "(joblib_folder, 'data_memmap')\n", (3990, 4020), False, 'import os\n'), ((4025, 4067), 'joblib.dump', 'dump', (['(T, B, v_flat)', 'data_filename_memmap'], {}), '((T, B, v_flat), data_filename_memmap)\n', (4029, 4067), False, 'from joblib import Parallel, delayed, dump, load\n'), ((4087, 4128), 'joblib.load', 'load', (['data_filename_memmap'], {'mmap_mode': '"""r"""'}), "(data_filename_memmap, mmap_mode='r')\n", (4091, 4128), False, 'from joblib import Parallel, delayed, dump, load\n'), ((4589, 4621), 'numpy.zeros', 'np.zeros', (['(pmax, config.nmax, N)'], {}), '((pmax, config.nmax, N))\n', (4597, 4621), True, 'import numpy as np\n'), ((4641, 4670), 'numpy.zeros', 'np.zeros', (['(pmax, config.nmax)'], {}), '((pmax, config.nmax))\n', (4649, 4670), True, 'import numpy as np\n'), ((5556, 5570), 'numpy.size', 'np.size', (['xgrid'], {}), '(xgrid)\n', (5563, 5570), True, 'import numpy as np\n'), ((5623, 5639), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (5631, 5639), True, 'import numpy as np\n'), ((5714, 5770), 'numpy.zeros', 'np.zeros', (['(config.spindims, config.lmax, config.nmax, N)'], {}), '((config.spindims, config.lmax, config.nmax, N))\n', (5722, 5770), True, 'import numpy as np\n'), ((5785, 5838), 'numpy.zeros', 'np.zeros', (['(config.spindims, config.lmax, config.nmax)'], {}), '((config.spindims, config.lmax, config.nmax))\n', (5793, 5838), True, 'import numpy as np\n'), ((7518, 7532), 'numpy.size', 'np.size', (['xgrid'], {}), '(xgrid)\n', (7525, 7532), True, 'import numpy as np\n'), ((7585, 7601), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (7593, 7601), True, 'import numpy as np\n'), ((7714, 7743), 'numpy.fill_diagonal', 'np.fill_diagonal', (['V_mat', 'v[p]'], {}), '(V_mat, v[p])\n', (7730, 7743), True, 'import numpy as np\n'), ((8039, 8085), 'scipy.sparse.linalg.eigs', 'eigs', (['H'], {'k': 'nmax', 'M': 'B', 'which': '"""LM"""', 'sigma': '(0.0001)'}), "(H, k=nmax, M=B, which='LM', sigma=0.0001)\n", (8043, 8085), False, 'from scipy.sparse.linalg import eigsh, eigs\n'), ((8893, 8914), 'numpy.argsort', 'np.argsort', (['l_eigvals'], {}), '(l_eigvals)\n', (8903, 8914), True, 'import numpy as np\n'), ((8929, 8958), 'numpy.array', 'np.array', (['l_eigvals[idr].real'], {}), '(l_eigvals[idr].real)\n', (8937, 8958), True, 'import numpy as np\n'), ((9166, 9207), 'mathtools.normalize_orbs', 'mathtools.normalize_orbs', (['eigfuncs', 'xgrid'], {}), '(eigfuncs, xgrid)\n', (9190, 9207), False, 'import mathtools\n'), ((1908, 1926), 'numpy.exp', 'np.exp', (['(-2 * xgrid)'], {}), '(-2 * xgrid)\n', (1914, 1926), True, 'import numpy as np\n'), ((3832, 3855), 'os.mkdir', 'os.mkdir', (['joblib_folder'], {}), '(joblib_folder)\n', (3840, 3855), False, 'import os\n'), ((4169, 4201), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'config.numcores'}), '(n_jobs=config.numcores)\n', (4177, 4201), False, 'from joblib import Parallel, delayed, dump, load\n'), ((4405, 4433), 'shutil.rmtree', 'shutil.rmtree', (['joblib_folder'], {}), '(joblib_folder)\n', (4418, 4433), False, 'import shutil\n'), ((3555, 3566), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (3563, 3566), True, 'import numpy as np\n'), ((6542, 6595), 'scipy.sparse.linalg.eigs', 'eigs', (['H'], {'k': 'config.nmax', 'M': 'B', 'which': '"""LM"""', 'sigma': '(0.0001)'}), "(H, k=config.nmax, M=B, which='LM', sigma=0.0001)\n", (6546, 6595), False, 'from scipy.sparse.linalg import eigsh, eigs\n'), ((9115, 9144), 'numpy.transpose', 'np.transpose', (['l_eigfuncs.real'], {}), '(l_eigfuncs.real)\n', (9127, 9144), True, 'import numpy as np\n'), ((6053, 6064), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (6061, 6064), True, 'import numpy as np\n'), ((3683, 3701), 'numpy.exp', 'np.exp', (['(-2 * xgrid)'], {}), '(-2 * xgrid)\n', (3689, 3701), True, 'import numpy as np\n'), ((4249, 4264), 'joblib.delayed', 'delayed', (['diag_H'], {}), '(diag_H)\n', (4256, 4264), False, 'from joblib import Parallel, delayed, dump, load\n'), ((6175, 6193), 'numpy.exp', 'np.exp', (['(-2 * xgrid)'], {}), '(-2 * xgrid)\n', (6181, 6193), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import unittest
import os.path
import sys
from anuga.utilities.system_tools import get_pathname_from_package
from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model
import numpy as num
class Test_culvert_routines_box_10pct(unittest.TestCase):
"""
This unit test sets up 6 tests for various culvert conditions for a Box Culvert on a 10% Slope
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_boyd_1(self):
"""test_boyd_1
This tests the Boyd routine with data obtained from ??? by <NAME>
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
inlet_depth=0.150
outlet_depth=0.15
inlet_velocity=1.00
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.6
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
culvert_slope=10.0 # % Downward
z_in = 10.0
z_out = old_div(-culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST01 Q-v-d',Q,v,d))
#print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 0.5526, 1.146, 0.1339))
assert num.allclose(Q, 0.5526, rtol=1.0e-1) #inflow
assert num.allclose(v, 1.146, rtol=1.0e-1) #outflow velocity
assert num.allclose(d, 0.1339, rtol=1.0e-1) #depth at outlet used to calc v
def test_boyd_2(self):
"""test_boyd_2
This tests the Boyd routine with data obtained from ??? by <NAME>
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=0.500
outlet_depth=0.700
inlet_velocity=1.0
outlet_velocity=0.50
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 0.0
z_out = old_div(-culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST02 Q-v-d',Q,v,d))
#print ('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 2.508, 1.897, 0.367))
assert num.allclose(Q, 2.508, rtol=1.0e-1) #inflow
assert num.allclose(v, 1.897, rtol=1.0e-1) #outflow velocity
assert num.allclose(d, 0.367, rtol=1.0e-1) #depth at outlet used to calc v
def test_boyd_3(self):
"""test_boyd_3
This tests the Boyd routine with data obtained from ??? by <NAME>
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.800
outlet_depth=0.80
inlet_velocity=1.0
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 0.0
z_out = old_div(-culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.2f'%('Delta E = ',delta_total_energy))
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST03 Q-v-d',Q,v,d))
#print ('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 13.554, 3.329, 1.131))
assert num.allclose(Q, 13.554, rtol=1.0e-2) #inflow
assert num.allclose(v, 3.329, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 1.131, rtol=1.0e-2) #depth at outlet used to calc v
#NOTE FROM HERE DOWN THE UNITS TEST HAVE NOT BEEN AMENDED TO ALLOW VELOCITY COMPONENT TO BE USED. ONLY ABOVE 3 TESTS WORK. PM WILL FIX THE ONES BELOW WHEN THE ABOVE 3 ARE WORKING
def test_boyd_4(self):
"""test_boyd_4
This tests the Boyd routine with data obtained from ??? by <NAME>
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.00
outlet_depth=0.8
inlet_velocity=1.0
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 10.0
z_out = 10.0-old_div(culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.2f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.2f'%('Delta E = ',delta_total_energy))
#print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST04 Q-v-d',Q,v,d))
#print ('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', 6.609, 2.621, 0.70))
assert num.allclose(Q, 6.609, rtol=1.0e-2) #inflow
assert num.allclose(v, 2.621, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 0.70, rtol=1.0e-2) #depth at outlet used to calc v
def test_boyd_5(self):
"""test_boyd_5
This tests the Boyd routine with data obtained from ??? by <NAME>
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.50
inlet_velocity= 1.0
outlet_depth=2.5
outlet_velocity=0.5
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 10.0
z_out = 10.0-old_div(culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.3f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.3f'%('Delta E = ',delta_total_energy))
#print ('%s,%.3f,%.3f,%.3f' %('ANUGAcalcsTEST05 Q-v-d',Q,v,d))
#print ('%s,%.3f,%.3f,%.3f' %('Spreadsheet_Boydcalcs',2.961, 0.685, 1.20))
assert num.allclose(Q, 2.961, rtol=1.0e-2) #inflow
assert num.allclose(v, 0.685, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 1.20, rtol=1.0e-2) #depth at outlet used to calc v
def test_boyd_6(self):
"""test_boyd_6
This tests the Boyd routine with data obtained from ??? by <NAME>
"""
# FIXME(Ole): This test fails (20 Feb 2009)
g=9.81
culvert_slope=10 # Downward
inlet_depth=1.50
inlet_velocity= 4.0
outlet_depth=0.80
outlet_velocity=4.0
culvert_length=10.0
culvert_width=3.60
culvert_height=1.20
culvert_type='box'
manning=0.013
sum_loss=1.5
inlet_specific_energy=inlet_depth + old_div(0.5*inlet_velocity**2,g)
z_in = 10.0
z_out = 10.0-old_div(culvert_length*culvert_slope,100)
E_in = z_in+inlet_depth + old_div(0.5*inlet_velocity**2,g)
E_out = z_out+outlet_depth + old_div(0.5*outlet_velocity**2,g)
delta_total_energy = E_in-E_out
Q, v, d = boyd_generalised_culvert_model(inlet_depth,
outlet_depth,
inlet_velocity,
outlet_velocity,
inlet_specific_energy,
delta_total_energy,
g,
culvert_length,
culvert_width,
culvert_height,
culvert_type,
manning,
sum_loss)
#print ('%s,%.3f'%('SPEC_E = ',inlet_specific_energy))
#print ('%s,%.3f'%('Delta E = ',delta_total_energy))
#print ('%s,%.3f,%.3f,%.3f' %('ANUGAcalcsTEST06 Q-v-d',Q,v,d))
#print ('%s,%.3f,%.3f,%.3f' %('Spreadsheet_Boydcalcs',15.537, 3.597, 1.20))
assert num.allclose(Q, 15.537, rtol=1.0e-2) #inflow
assert num.allclose(v, 3.597, rtol=1.0e-2) #outflow velocity
assert num.allclose(d, 1.20, rtol=1.0e-2) #depth at outlet used to calc v
# =========================================================================
# =========================================================================
if __name__ == "__main__":
suite = unittest.makeSuite(Test_culvert_routines_box_10pct, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
| [
"numpy.allclose",
"anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model",
"unittest.makeSuite",
"past.utils.old_div",
"unittest.TextTestRunner"
] | [((13929, 13988), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_culvert_routines_box_10pct', '"""test"""'], {}), "(Test_culvert_routines_box_10pct, 'test')\n", (13947, 13988), False, 'import unittest\n'), ((14002, 14027), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (14025, 14027), False, 'import unittest\n'), ((1197, 1242), 'past.utils.old_div', 'old_div', (['(-culvert_length * culvert_slope)', '(100)'], {}), '(-culvert_length * culvert_slope, 100)\n', (1204, 1242), False, 'from past.utils import old_div\n'), ((1515, 1743), 'anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model', 'boyd_generalised_culvert_model', (['inlet_depth', 'outlet_depth', 'inlet_velocity', 'outlet_velocity', 'inlet_specific_energy', 'delta_total_energy', 'g', 'culvert_length', 'culvert_width', 'culvert_height', 'culvert_type', 'manning', 'sum_loss'], {}), '(inlet_depth, outlet_depth, inlet_velocity,\n outlet_velocity, inlet_specific_energy, delta_total_energy, g,\n culvert_length, culvert_width, culvert_height, culvert_type, manning,\n sum_loss)\n', (1545, 1743), False, 'from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model\n'), ((2503, 2536), 'numpy.allclose', 'num.allclose', (['Q', '(0.5526)'], {'rtol': '(0.1)'}), '(Q, 0.5526, rtol=0.1)\n', (2515, 2536), True, 'import numpy as num\n'), ((2563, 2595), 'numpy.allclose', 'num.allclose', (['v', '(1.146)'], {'rtol': '(0.1)'}), '(v, 1.146, rtol=0.1)\n', (2575, 2595), True, 'import numpy as num\n'), ((2632, 2665), 'numpy.allclose', 'num.allclose', (['d', '(0.1339)'], {'rtol': '(0.1)'}), '(d, 0.1339, rtol=0.1)\n', (2644, 2665), True, 'import numpy as num\n'), ((3360, 3405), 'past.utils.old_div', 'old_div', (['(-culvert_length * culvert_slope)', '(100)'], {}), '(-culvert_length * culvert_slope, 100)\n', (3367, 3405), False, 'from past.utils import old_div\n'), ((3600, 3828), 'anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model', 'boyd_generalised_culvert_model', (['inlet_depth', 'outlet_depth', 'inlet_velocity', 'outlet_velocity', 'inlet_specific_energy', 'delta_total_energy', 'g', 'culvert_length', 'culvert_width', 'culvert_height', 'culvert_type', 'manning', 'sum_loss'], {}), '(inlet_depth, outlet_depth, inlet_velocity,\n outlet_velocity, inlet_specific_energy, delta_total_energy, g,\n culvert_length, culvert_width, culvert_height, culvert_type, manning,\n sum_loss)\n', (3630, 3828), False, 'from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model\n'), ((4587, 4619), 'numpy.allclose', 'num.allclose', (['Q', '(2.508)'], {'rtol': '(0.1)'}), '(Q, 2.508, rtol=0.1)\n', (4599, 4619), True, 'import numpy as num\n'), ((4646, 4678), 'numpy.allclose', 'num.allclose', (['v', '(1.897)'], {'rtol': '(0.1)'}), '(v, 1.897, rtol=0.1)\n', (4658, 4678), True, 'import numpy as num\n'), ((4715, 4747), 'numpy.allclose', 'num.allclose', (['d', '(0.367)'], {'rtol': '(0.1)'}), '(d, 0.367, rtol=0.1)\n', (4727, 4747), True, 'import numpy as num\n'), ((5433, 5478), 'past.utils.old_div', 'old_div', (['(-culvert_length * culvert_slope)', '(100)'], {}), '(-culvert_length * culvert_slope, 100)\n', (5440, 5478), False, 'from past.utils import old_div\n'), ((5673, 5901), 'anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model', 'boyd_generalised_culvert_model', (['inlet_depth', 'outlet_depth', 'inlet_velocity', 'outlet_velocity', 'inlet_specific_energy', 'delta_total_energy', 'g', 'culvert_length', 'culvert_width', 'culvert_height', 'culvert_type', 'manning', 'sum_loss'], {}), '(inlet_depth, outlet_depth, inlet_velocity,\n outlet_velocity, inlet_specific_energy, delta_total_energy, g,\n culvert_length, culvert_width, culvert_height, culvert_type, manning,\n sum_loss)\n', (5703, 5901), False, 'from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model\n'), ((6776, 6810), 'numpy.allclose', 'num.allclose', (['Q', '(13.554)'], {'rtol': '(0.01)'}), '(Q, 13.554, rtol=0.01)\n', (6788, 6810), True, 'import numpy as num\n'), ((6836, 6869), 'numpy.allclose', 'num.allclose', (['v', '(3.329)'], {'rtol': '(0.01)'}), '(v, 3.329, rtol=0.01)\n', (6848, 6869), True, 'import numpy as num\n'), ((6905, 6938), 'numpy.allclose', 'num.allclose', (['d', '(1.131)'], {'rtol': '(0.01)'}), '(d, 1.131, rtol=0.01)\n', (6917, 6938), True, 'import numpy as num\n'), ((8036, 8264), 'anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model', 'boyd_generalised_culvert_model', (['inlet_depth', 'outlet_depth', 'inlet_velocity', 'outlet_velocity', 'inlet_specific_energy', 'delta_total_energy', 'g', 'culvert_length', 'culvert_width', 'culvert_height', 'culvert_type', 'manning', 'sum_loss'], {}), '(inlet_depth, outlet_depth, inlet_velocity,\n outlet_velocity, inlet_specific_energy, delta_total_energy, g,\n culvert_length, culvert_width, culvert_height, culvert_type, manning,\n sum_loss)\n', (8066, 8264), False, 'from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model\n'), ((9145, 9178), 'numpy.allclose', 'num.allclose', (['Q', '(6.609)'], {'rtol': '(0.01)'}), '(Q, 6.609, rtol=0.01)\n', (9157, 9178), True, 'import numpy as num\n'), ((9204, 9237), 'numpy.allclose', 'num.allclose', (['v', '(2.621)'], {'rtol': '(0.01)'}), '(v, 2.621, rtol=0.01)\n', (9216, 9237), True, 'import numpy as num\n'), ((9273, 9304), 'numpy.allclose', 'num.allclose', (['d', '(0.7)'], {'rtol': '(0.01)'}), '(d, 0.7, rtol=0.01)\n', (9285, 9304), True, 'import numpy as num\n'), ((10225, 10453), 'anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model', 'boyd_generalised_culvert_model', (['inlet_depth', 'outlet_depth', 'inlet_velocity', 'outlet_velocity', 'inlet_specific_energy', 'delta_total_energy', 'g', 'culvert_length', 'culvert_width', 'culvert_height', 'culvert_type', 'manning', 'sum_loss'], {}), '(inlet_depth, outlet_depth, inlet_velocity,\n outlet_velocity, inlet_specific_energy, delta_total_energy, g,\n culvert_length, culvert_width, culvert_height, culvert_type, manning,\n sum_loss)\n', (10255, 10453), False, 'from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model\n'), ((11342, 11375), 'numpy.allclose', 'num.allclose', (['Q', '(2.961)'], {'rtol': '(0.01)'}), '(Q, 2.961, rtol=0.01)\n', (11354, 11375), True, 'import numpy as num\n'), ((11401, 11434), 'numpy.allclose', 'num.allclose', (['v', '(0.685)'], {'rtol': '(0.01)'}), '(v, 0.685, rtol=0.01)\n', (11413, 11434), True, 'import numpy as num\n'), ((11470, 11501), 'numpy.allclose', 'num.allclose', (['d', '(1.2)'], {'rtol': '(0.01)'}), '(d, 1.2, rtol=0.01)\n', (11482, 11501), True, 'import numpy as num\n'), ((12423, 12651), 'anuga.culvert_flows.culvert_routines.boyd_generalised_culvert_model', 'boyd_generalised_culvert_model', (['inlet_depth', 'outlet_depth', 'inlet_velocity', 'outlet_velocity', 'inlet_specific_energy', 'delta_total_energy', 'g', 'culvert_length', 'culvert_width', 'culvert_height', 'culvert_type', 'manning', 'sum_loss'], {}), '(inlet_depth, outlet_depth, inlet_velocity,\n outlet_velocity, inlet_specific_energy, delta_total_energy, g,\n culvert_length, culvert_width, culvert_height, culvert_type, manning,\n sum_loss)\n', (12453, 12651), False, 'from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model\n'), ((13541, 13575), 'numpy.allclose', 'num.allclose', (['Q', '(15.537)'], {'rtol': '(0.01)'}), '(Q, 15.537, rtol=0.01)\n', (13553, 13575), True, 'import numpy as num\n'), ((13601, 13634), 'numpy.allclose', 'num.allclose', (['v', '(3.597)'], {'rtol': '(0.01)'}), '(v, 3.597, rtol=0.01)\n', (13613, 13634), True, 'import numpy as num\n'), ((13670, 13701), 'numpy.allclose', 'num.allclose', (['d', '(1.2)'], {'rtol': '(0.01)'}), '(d, 1.2, rtol=0.01)\n', (13682, 13701), True, 'import numpy as num\n'), ((1086, 1123), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (1093, 1123), False, 'from past.utils import old_div\n'), ((1274, 1311), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (1281, 1311), False, 'from past.utils import old_div\n'), ((1344, 1382), 'past.utils.old_div', 'old_div', (['(0.5 * outlet_velocity ** 2)', 'g'], {}), '(0.5 * outlet_velocity ** 2, g)\n', (1351, 1382), False, 'from past.utils import old_div\n'), ((1462, 1499), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (1469, 1499), False, 'from past.utils import old_div\n'), ((3291, 3328), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (3298, 3328), False, 'from past.utils import old_div\n'), ((3437, 3474), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (3444, 3474), False, 'from past.utils import old_div\n'), ((3507, 3545), 'past.utils.old_div', 'old_div', (['(0.5 * outlet_velocity ** 2)', 'g'], {}), '(0.5 * outlet_velocity ** 2, g)\n', (3514, 3545), False, 'from past.utils import old_div\n'), ((5364, 5401), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (5371, 5401), False, 'from past.utils import old_div\n'), ((5510, 5547), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (5517, 5547), False, 'from past.utils import old_div\n'), ((5580, 5618), 'past.utils.old_div', 'old_div', (['(0.5 * outlet_velocity ** 2)', 'g'], {}), '(0.5 * outlet_velocity ** 2, g)\n', (5587, 5618), False, 'from past.utils import old_div\n'), ((7720, 7757), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (7727, 7757), False, 'from past.utils import old_div\n'), ((7795, 7839), 'past.utils.old_div', 'old_div', (['(culvert_length * culvert_slope)', '(100)'], {}), '(culvert_length * culvert_slope, 100)\n', (7802, 7839), False, 'from past.utils import old_div\n'), ((7871, 7908), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (7878, 7908), False, 'from past.utils import old_div\n'), ((7941, 7979), 'past.utils.old_div', 'old_div', (['(0.5 * outlet_velocity ** 2)', 'g'], {}), '(0.5 * outlet_velocity ** 2, g)\n', (7948, 7979), False, 'from past.utils import old_div\n'), ((9909, 9946), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (9916, 9946), False, 'from past.utils import old_div\n'), ((9984, 10028), 'past.utils.old_div', 'old_div', (['(culvert_length * culvert_slope)', '(100)'], {}), '(culvert_length * culvert_slope, 100)\n', (9991, 10028), False, 'from past.utils import old_div\n'), ((10060, 10097), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (10067, 10097), False, 'from past.utils import old_div\n'), ((10130, 10168), 'past.utils.old_div', 'old_div', (['(0.5 * outlet_velocity ** 2)', 'g'], {}), '(0.5 * outlet_velocity ** 2, g)\n', (10137, 10168), False, 'from past.utils import old_div\n'), ((12107, 12144), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (12114, 12144), False, 'from past.utils import old_div\n'), ((12182, 12226), 'past.utils.old_div', 'old_div', (['(culvert_length * culvert_slope)', '(100)'], {}), '(culvert_length * culvert_slope, 100)\n', (12189, 12226), False, 'from past.utils import old_div\n'), ((12258, 12295), 'past.utils.old_div', 'old_div', (['(0.5 * inlet_velocity ** 2)', 'g'], {}), '(0.5 * inlet_velocity ** 2, g)\n', (12265, 12295), False, 'from past.utils import old_div\n'), ((12328, 12366), 'past.utils.old_div', 'old_div', (['(0.5 * outlet_velocity ** 2)', 'g'], {}), '(0.5 * outlet_velocity ** 2, g)\n', (12335, 12366), False, 'from past.utils import old_div\n')] |
from numpy import linalg
import numpy as np
from loguru import logger
class Regression:
def __init__(self, intercept=True):
self.beta = None
self.intercept = intercept
def fit(self, features, labels):
features = self._add_bias(features)
self._fit(features, labels)
def predict(self, features):
features = self._add_bias(features)
return self._predict(features)
def _add_bias(self, features):
if self.intercept:
ones = np.ones((len(features), 1))
return np.hstack([ones, features])
return features
def _fit(self, features, labels):
raise NotImplementedError
def _predict(self, features):
return features @ self.beta
class LeastSquare(Regression):
def __init__(self, intercept=False):
super().__init__(intercept)
def _fit(self, features, labels):
xx = features.T @ features
xy = features.T @ labels
self.beta = linalg.solve(xx, xy)
class Ridge(Regression):
def __init__(self, intercept=False, _lambda=0.2):
super().__init__(intercept)
self._lambda = _lambda
def _fit(self, features, labels):
identity = self._lambda * np.identity(features.shape[1])
xy = features.T @ labels
xx = features.T @ features
self.beta = np.linalg.inv(xx + identity) @ xy
class GradientDescent(Regression):
def __init__(self, intercept=False, learning_rate=0.01, batch=10000,
threshold=1e-5, random=False):
super().__init__(intercept)
self._learning_rate = learning_rate
self._batch = batch
self._threshold = threshold
self._random = random
def _fit(self, features, labels):
observations, dimensions = features.shape
beta = np.random.randn(dimensions).reshape(-1, 1) if self._random \
else np.ones(dimensions).reshape(-1, 1)
for i in range(self._batch):
error = features @ beta - labels
update = features.T @ error * (self._learning_rate / observations)
beta -= update
if np.abs(update).sum() < self._threshold:
self.beta = beta
return
logger.warning('Gradient did not converge with learning rate {}'.format(self._learning_rate))
return
| [
"numpy.identity",
"numpy.abs",
"numpy.linalg.solve",
"numpy.ones",
"numpy.hstack",
"numpy.linalg.inv",
"numpy.random.randn"
] | [((990, 1010), 'numpy.linalg.solve', 'linalg.solve', (['xx', 'xy'], {}), '(xx, xy)\n', (1002, 1010), False, 'from numpy import linalg\n'), ((555, 582), 'numpy.hstack', 'np.hstack', (['[ones, features]'], {}), '([ones, features])\n', (564, 582), True, 'import numpy as np\n'), ((1233, 1263), 'numpy.identity', 'np.identity', (['features.shape[1]'], {}), '(features.shape[1])\n', (1244, 1263), True, 'import numpy as np\n'), ((1352, 1380), 'numpy.linalg.inv', 'np.linalg.inv', (['(xx + identity)'], {}), '(xx + identity)\n', (1365, 1380), True, 'import numpy as np\n'), ((1824, 1851), 'numpy.random.randn', 'np.random.randn', (['dimensions'], {}), '(dimensions)\n', (1839, 1851), True, 'import numpy as np\n'), ((1902, 1921), 'numpy.ones', 'np.ones', (['dimensions'], {}), '(dimensions)\n', (1909, 1921), True, 'import numpy as np\n'), ((2143, 2157), 'numpy.abs', 'np.abs', (['update'], {}), '(update)\n', (2149, 2157), True, 'import numpy as np\n')] |
"""
This module provides functions to get the dimensionality of a structure.
A number of different algorithms are implemented. These are based on the
following publications:
get_dimensionality_larsen:
- <NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
get_dimensionality_cheon:
- <NAME>.; <NAME>.; <NAME>.; <NAME>.; Reed,
<NAME>. Data Mining for New Two- and One-Dimensional Weakly Bonded Solids
and Lattice-Commensurate Heterostructures. Nano Lett. 2017.
get_dimensionality_gorai:
- <NAME>., <NAME>. & <NAME>. Computational Identification of
Promising Thermoelectric Materials Among Known Quasi-2D Binary Compounds.
J. Mater. Chem. A 2, 4136 (2016).
"""
import copy
import itertools
from collections import defaultdict
import numpy as np
from networkx.readwrite import json_graph
from pymatgen.analysis.graphs import MoleculeGraph, StructureGraph
from pymatgen.analysis.local_env import JmolNN
from pymatgen.analysis.structure_analyzer import get_max_bond_lengths
from pymatgen.core.lattice import get_integer_index
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Structure, Molecule
from pymatgen.core.surface import SlabGenerator
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "<NAME>, <NAME>, <NAME>"
def get_dimensionality_larsen(bonded_structure):
"""
Gets the dimensionality of a bonded structure.
The dimensionality of the structure is the highest dimensionality of all
structure components. This method is very robust and can handle
many tricky structures, regardless of structure type or improper connections
due to periodic boundary conditions.
Requires a StructureGraph object as input. This can be generated using one
of the NearNeighbor classes. For example, using the CrystalNN class::
bonded_structure = CrystalNN().get_bonded_structure(structure)
Based on the modified breadth-first-search algorithm described in:
<NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
Returns:
(int): The dimensionality of the structure.
"""
return max([c['dimensionality'] for c in
get_structure_components(bonded_structure)])
def get_structure_components(bonded_structure, inc_orientation=False,
inc_site_ids=False, inc_molecule_graph=False):
"""
Gets information on the components in a bonded structure.
Correctly determines the dimensionality of all structures, regardless of
structure type or improper connections due to periodic boundary conditions.
Requires a StructureGraph object as input. This can be generated using one
of the NearNeighbor classes. For example, using the CrystalNN class::
bonded_structure = CrystalNN().get_bonded_structure(structure)
Based on the modified breadth-first-search algorithm described in:
<NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
inc_orientation (bool, optional): Whether to include the orientation
of the structure component. For surfaces, the miller index is given,
for one-dimensional structures, the direction of the chain is given.
inc_site_ids (bool, optional): Whether to include the site indices
of the sites in the structure component.
inc_molecule_graph (bool, optional): Whether to include MoleculeGraph
objects for zero-dimensional components.
Returns:
(list of dict): Information on the components in a structure as a list
of dictionaries with the keys:
- "structure_graph": A pymatgen StructureGraph object for the
component.
- "dimensionality": The dimensionality of the structure component as an
int.
- "orientation": If inc_orientation is `True`, the orientation of the
component as a tuple. E.g. (1, 1, 1)
- "site_ids": If inc_site_ids is `True`, the site indices of the
sites in the component as a tuple.
- "molecule_graph": If inc_molecule_graph is `True`, the site a
MoleculeGraph object for zero-dimensional components.
"""
import networkx as nx # optional dependency therefore not top level import
comp_graphs = (bonded_structure.graph.subgraph(c) for c in
nx.weakly_connected_components(bonded_structure.graph))
components = []
for graph in comp_graphs:
dimensionality, vertices = calculate_dimensionality_of_site(
bonded_structure, list(graph.nodes())[0], inc_vertices=True)
component = {'dimensionality': dimensionality}
if inc_orientation:
if dimensionality in [1, 2]:
vertices = np.array(vertices)
g = vertices.sum(axis=0) / vertices.shape[0]
# run singular value decomposition
_, _, vh = np.linalg.svd(vertices - g)
# get direction (first column is best fit line,
# 3rd column is unitary norm)
index = 2 if dimensionality == 2 else 0
orientation = get_integer_index(vh[index, :])
else:
orientation = None
component['orientation'] = orientation
if inc_site_ids:
component['site_ids'] = tuple(graph.nodes())
if inc_molecule_graph and dimensionality == 0:
component['molecule_graph'] = zero_d_graph_to_molecule_graph(
bonded_structure, graph)
component_structure = Structure.from_sites(
[bonded_structure.structure[n] for n in sorted(graph.nodes())])
sorted_graph = nx.convert_node_labels_to_integers(
graph, ordering="sorted")
component_graph = StructureGraph(
component_structure,
graph_data=json_graph.adjacency_data(sorted_graph))
component['structure_graph'] = component_graph
components.append(component)
return components
def calculate_dimensionality_of_site(bonded_structure, site_index,
inc_vertices=False):
"""
Calculates the dimensionality of the component containing the given site.
Implements directly the modified breadth-first-search algorithm described in
Algorithm 1 of:
<NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
site_index (int): The index of a site in the component of interest.
inc_vertices (bool, optional): Whether to return the vertices (site
images) of the component.
Returns:
(int or tuple): If inc_vertices is False, the dimensionality of the
component will be returned as an int. If inc_vertices is true, the
function will return a tuple of (dimensionality, vertices), where
vertices is a list of tuples. E.g. [(0, 0, 0), (1, 1, 1)].
"""
def neighbours(comp_index):
return [(s.index, s.jimage) for s
in bonded_structure.get_connected_sites(comp_index)]
def rank(vertices):
if len(vertices) == 0:
return -1
if len(vertices) == 1:
return 0
vertices = np.array(list(vertices))
return np.linalg.matrix_rank(vertices[1:] - vertices[0])
def rank_increase(seen, candidate):
rank0 = len(seen) - 1
rank1 = rank(seen.union({candidate}))
return rank1 > rank0
connected_sites = {i: neighbours(i) for i in
range(bonded_structure.structure.num_sites)}
seen_vertices = set()
seen_comp_vertices = defaultdict(set)
queue = [(site_index, (0, 0, 0))]
while len(queue) > 0:
comp_i, image_i = queue.pop(0)
if (comp_i, image_i) in seen_vertices:
continue
seen_vertices.add((comp_i, image_i))
if not rank_increase(seen_comp_vertices[comp_i], image_i):
continue
seen_comp_vertices[comp_i].add(image_i)
for comp_j, image_j in connected_sites[comp_i]:
image_j = tuple(np.add(image_j, image_i))
if (comp_j, image_j) in seen_vertices:
continue
if rank_increase(seen_comp_vertices[comp_j], image_j):
queue.append((comp_j, image_j))
if inc_vertices:
return (rank(seen_comp_vertices[site_index]),
list(seen_comp_vertices[site_index]))
return rank(seen_comp_vertices[site_index])
def zero_d_graph_to_molecule_graph(bonded_structure, graph):
"""
Converts a zero-dimensional networkx Graph object into a MoleculeGraph.
Implements a similar breadth-first search to that in
calculate_dimensionality_of_site().
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
graph (nx.Graph): A networkx `Graph` object for the component of
interest.
Returns:
(MoleculeGraph): A MoleculeGraph object of the component.
"""
import networkx as nx
seen_indices = []
sites = []
start_index = list(graph.nodes())[0]
queue = [(start_index, (0, 0, 0),
bonded_structure.structure[start_index])]
while len(queue) > 0:
comp_i, image_i, site_i = queue.pop(0)
if comp_i in [x[0] for x in seen_indices]:
raise ValueError("Graph component is not 0D")
seen_indices.append((comp_i, image_i))
sites.append(site_i)
for site_j in bonded_structure.get_connected_sites(
comp_i, jimage=image_i):
if ((site_j.index, site_j.jimage) not in seen_indices and
(site_j.index, site_j.jimage, site_j.site) not in queue):
queue.append((site_j.index, site_j.jimage, site_j.site))
# sort the list of indices and the graph by index to make consistent
indices_ordering = np.argsort([x[0] for x in seen_indices])
sorted_sites = np.array(sites, dtype=object)[indices_ordering]
sorted_graph = nx.convert_node_labels_to_integers(graph, ordering="sorted")
mol = Molecule([s.specie for s in sorted_sites],
[s.coords for s in sorted_sites])
mol_graph = MoleculeGraph.with_edges(mol, nx.Graph(sorted_graph).edges())
return mol_graph
def get_dimensionality_cheon(structure_raw, tolerance=0.45,
ldict=JmolNN().el_radius, standardize=True, larger_cell=False):
"""
Algorithm for finding the dimensions of connected subunits in a structure.
This method finds the dimensionality of the material even when the material
is not layered along low-index planes, or does not have flat
layers/molecular wires.
Author: "<NAME>"
Email: "<EMAIL>"
See details at :
<NAME>.; <NAME>.; <NAME>.; <NAME>.; Reed,
<NAME>. Data Mining for New Two- and One-Dimensional Weakly Bonded Solids and
Lattice-Commensurate Heterostructures. Nano Lett. 2017.
Args:
structure_raw (Structure): A pymatgen Structure object.
tolerance (float): length in angstroms used in finding bonded atoms.
Two atoms are considered bonded if (radius of atom 1) + (radius of
atom 2) + (tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict (dict): dictionary of bond lengths used in finding bonded atoms.
Values from JMol are used as default
standardize: works with conventional standard structures if True. It is
recommended to keep this as True.
larger_cell: tests with 3x3x3 supercell instead of 2x2x2. Testing with
2x2x2 supercell is faster but misclssifies rare interpenetrated 3D
structures. Testing with a larger cell circumvents this problem
Returns:
(str): dimension of the largest cluster as a string. If there are ions
or molecules it returns 'intercalated ion/molecule'
"""
if standardize:
structure = SpacegroupAnalyzer(structure_raw).get_conventional_standard_structure()
else:
structure = structure_raw
structure_save = copy.copy(structure_raw)
connected_list1 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max1, min1, clusters1 = find_clusters(structure, connected_list1)
if larger_cell:
structure.make_supercell([[3, 0, 0], [0, 3, 0], [0, 0, 3]])
connected_list3 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max3, min3, clusters3 = find_clusters(structure, connected_list3)
if min3 == min1:
if max3 == max1:
dim = '0D'
else:
dim = 'intercalated molecule'
else:
dim = np.log2(float(max3) / max1) / np.log2(3)
if dim == int(dim):
dim = str(int(dim)) + 'D'
else:
return None
else:
structure.make_supercell([[2, 0, 0], [0, 2, 0], [0, 0, 2]])
connected_list2 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max2, min2, clusters2 = find_clusters(structure, connected_list2)
if min2 == 1:
dim = 'intercalated ion'
elif min2 == min1:
if max2 == max1:
dim = '0D'
else:
dim = 'intercalated molecule'
else:
dim = np.log2(float(max2) / max1)
if dim == int(dim):
dim = str(int(dim)) + 'D'
else:
structure = copy.copy(structure_save)
structure.make_supercell([[3, 0, 0], [0, 3, 0], [0, 0, 3]])
connected_list3 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max3, min3, clusters3 = find_clusters(structure, connected_list3)
if min3 == min2:
if max3 == max2:
dim = '0D'
else:
dim = 'intercalated molecule'
else:
dim = np.log2(float(max3) / max1) / np.log2(3)
if dim == int(dim):
dim = str(int(dim)) + 'D'
else:
return None
return dim
def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
"""
Finds bonded atoms and returns a adjacency matrix of bonded atoms.
Author: "<NAME>"
Email: "<EMAIL>"
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms
are considered bonded if (radius of atom 1) + (radius of atom 2) +
(tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values
from JMol are used as default
Returns:
(np.ndarray): A numpy array of shape (number of atoms, number of atoms);
If any image of atom j is bonded to atom i with periodic boundary
conditions, the matrix element [atom i, atom j] is 1.
"""
# pylint: disable=E1136
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
fc_copy = np.repeat(fc[:, :, np.newaxis], 27, axis=2)
neighbors = np.array(list(itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]))).T
neighbors = np.repeat(neighbors[np.newaxis, :, :], 1, axis=0)
fc_diff = fc_copy - neighbors
species = list(map(str, struct.species))
# in case of charged species
for i, item in enumerate(species):
if item not in ldict.keys():
species[i] = str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_matrix = np.zeros((n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(i + 1, n_atoms):
max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance
frac_diff = fc_diff[j] - fc_copy[i]
distance_ij = np.dot(latmat.T, frac_diff)
# print(np.linalg.norm(distance_ij,axis=0))
if sum(np.linalg.norm(distance_ij, axis=0) < max_bond_length) > 0:
connected_matrix[i, j] = 1
connected_matrix[j, i] = 1
return connected_matrix
def find_clusters(struct, connected_matrix):
"""
Finds bonded clusters of atoms in the structure with periodic boundary
conditions.
If there are atoms that are not bonded to anything, returns [0,1,0]. (For
faster computation time)
Author: "<NAME>"
Email: "<EMAIL>"
Args:
struct (Structure): Input structure
connected_matrix: Must be made from the same structure with
find_connected_atoms() function.
Returns:
max_cluster: the size of the largest cluster in the crystal structure
min_cluster: the size of the smallest cluster in the crystal structure
clusters: list of bonded clusters found here, clusters are formatted as
sets of indices of atoms
"""
n_atoms = len(struct.species)
if n_atoms == 0:
return [0, 0, 0]
if 0 in np.sum(connected_matrix, axis=0):
return [0, 1, 0]
cluster_sizes = []
clusters = []
visited = [False for item in range(n_atoms)]
connected_matrix += np.eye(len(connected_matrix))
def visit(atom, atom_cluster):
visited[atom] = True
new_cluster = set(np.where(connected_matrix[atom] != 0)[0]).union(atom_cluster)
atom_cluster = new_cluster
for new_atom in atom_cluster:
if not visited[new_atom]:
visited[new_atom] = True
atom_cluster = visit(new_atom, atom_cluster)
return atom_cluster
for i in range(n_atoms):
if not visited[i]:
atom_cluster = set()
cluster = visit(i, atom_cluster)
clusters.append(cluster)
cluster_sizes.append(len(cluster))
max_cluster = max(cluster_sizes)
min_cluster = min(cluster_sizes)
return [max_cluster, min_cluster, clusters]
def get_dimensionality_gorai(structure, max_hkl=2, el_radius_updates=None,
min_slab_size=5, min_vacuum_size=5,
standardize=True, bonds=None):
"""
This method returns whether a structure is 3D, 2D (layered), or 1D (linear
chains or molecules) according to the algorithm published in <NAME>.,
<NAME>. & <NAME>. Computational Identification of Promising
Thermoelectric Materials Among Known Quasi-2D Binary Compounds. J. Mater.
Chem. A 2, 4136 (2016).
Note that a 1D structure detection might indicate problems in the bonding
algorithm, particularly for ionic crystals (e.g., NaCl)
Users can change the behavior of bonds detection by passing either
el_radius_updates to update atomic radii for auto-detection of max bond
distances, or bonds to explicitly specify max bond distances for atom pairs.
Note that if you pass both, el_radius_updates are ignored.
Args:
structure: (Structure) structure to analyze dimensionality for
max_hkl: (int) max index of planes to look for layers
el_radius_updates: (dict) symbol->float to update atomic radii
min_slab_size: (float) internal surface construction parameter
min_vacuum_size: (float) internal surface construction parameter
standardize (bool): whether to standardize the structure before
analysis. Set to False only if you already have the structure in a
convention where layers / chains will be along low <hkl> indexes.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns: (int) the dimensionality of the structure - 1 (molecules/chains),
2 (layered), or 3 (3D)
"""
if standardize:
structure = SpacegroupAnalyzer(structure). \
get_conventional_standard_structure()
if not bonds:
bonds = get_max_bond_lengths(structure, el_radius_updates)
num_surfaces = 0
for h in range(max_hkl):
for k in range(max_hkl):
for l in range(max_hkl):
if max([h, k, l]) > 0 and num_surfaces < 2:
sg = SlabGenerator(structure, (h, k, l),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size)
slabs = sg.get_slabs(bonds)
for _ in slabs:
num_surfaces += 1
return 3 - min(num_surfaces, 2)
| [
"numpy.linalg.matrix_rank",
"pymatgen.core.structure.Molecule",
"numpy.argsort",
"numpy.array",
"networkx.weakly_connected_components",
"numpy.linalg.norm",
"copy.copy",
"numpy.repeat",
"numpy.where",
"pymatgen.core.periodic_table.Specie.from_string",
"itertools.product",
"numpy.dot",
"pymat... | [((8654, 8670), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (8665, 8670), False, 'from collections import defaultdict\n'), ((11037, 11077), 'numpy.argsort', 'np.argsort', (['[x[0] for x in seen_indices]'], {}), '([x[0] for x in seen_indices])\n', (11047, 11077), True, 'import numpy as np\n'), ((11164, 11224), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['graph'], {'ordering': '"""sorted"""'}), "(graph, ordering='sorted')\n", (11198, 11224), True, 'import networkx as nx\n'), ((11235, 11311), 'pymatgen.core.structure.Molecule', 'Molecule', (['[s.specie for s in sorted_sites]', '[s.coords for s in sorted_sites]'], {}), '([s.specie for s in sorted_sites], [s.coords for s in sorted_sites])\n', (11243, 11311), False, 'from pymatgen.core.structure import Structure, Molecule\n'), ((13302, 13326), 'copy.copy', 'copy.copy', (['structure_raw'], {}), '(structure_raw)\n', (13311, 13326), False, 'import copy\n'), ((16410, 16438), 'numpy.array', 'np.array', (['struct.frac_coords'], {}), '(struct.frac_coords)\n', (16418, 16438), True, 'import numpy as np\n'), ((16453, 16496), 'numpy.repeat', 'np.repeat', (['fc[:, :, np.newaxis]', '(27)'], {'axis': '(2)'}), '(fc[:, :, np.newaxis], 27, axis=2)\n', (16462, 16496), True, 'import numpy as np\n'), ((16601, 16650), 'numpy.repeat', 'np.repeat', (['neighbors[np.newaxis, :, :]', '(1)'], {'axis': '(0)'}), '(neighbors[np.newaxis, :, :], 1, axis=0)\n', (16610, 16650), True, 'import numpy as np\n'), ((16960, 16988), 'numpy.zeros', 'np.zeros', (['(n_atoms, n_atoms)'], {}), '((n_atoms, n_atoms))\n', (16968, 16988), True, 'import numpy as np\n'), ((6421, 6481), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['graph'], {'ordering': '"""sorted"""'}), "(graph, ordering='sorted')\n", (6455, 6481), True, 'import networkx as nx\n'), ((8288, 8337), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['(vertices[1:] - vertices[0])'], {}), '(vertices[1:] - vertices[0])\n', (8309, 8337), True, 'import numpy as np\n'), ((11097, 11126), 'numpy.array', 'np.array', (['sites'], {'dtype': 'object'}), '(sites, dtype=object)\n', (11105, 11126), True, 'import numpy as np\n'), ((11528, 11536), 'pymatgen.analysis.local_env.JmolNN', 'JmolNN', ([], {}), '()\n', (11534, 11536), False, 'from pymatgen.analysis.local_env import JmolNN\n'), ((15489, 15497), 'pymatgen.analysis.local_env.JmolNN', 'JmolNN', ([], {}), '()\n', (15495, 15497), False, 'from pymatgen.analysis.local_env import JmolNN\n'), ((18339, 18371), 'numpy.sum', 'np.sum', (['connected_matrix'], {'axis': '(0)'}), '(connected_matrix, axis=0)\n', (18345, 18371), True, 'import numpy as np\n'), ((21365, 21415), 'pymatgen.analysis.structure_analyzer.get_max_bond_lengths', 'get_max_bond_lengths', (['structure', 'el_radius_updates'], {}), '(structure, el_radius_updates)\n', (21385, 21415), False, 'from pymatgen.analysis.structure_analyzer import get_max_bond_lengths\n'), ((5090, 5144), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['bonded_structure.graph'], {}), '(bonded_structure.graph)\n', (5120, 5144), True, 'import networkx as nx\n'), ((17213, 17240), 'numpy.dot', 'np.dot', (['latmat.T', 'frac_diff'], {}), '(latmat.T, frac_diff)\n', (17219, 17240), True, 'import numpy as np\n'), ((5492, 5510), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (5500, 5510), True, 'import numpy as np\n'), ((5652, 5679), 'numpy.linalg.svd', 'np.linalg.svd', (['(vertices - g)'], {}), '(vertices - g)\n', (5665, 5679), True, 'import numpy as np\n'), ((5877, 5908), 'pymatgen.core.lattice.get_integer_index', 'get_integer_index', (['vh[index, :]'], {}), '(vh[index, :])\n', (5894, 5908), False, 'from pymatgen.core.lattice import get_integer_index\n'), ((6593, 6632), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['sorted_graph'], {}), '(sorted_graph)\n', (6618, 6632), False, 'from networkx.readwrite import json_graph\n'), ((9113, 9137), 'numpy.add', 'np.add', (['image_j', 'image_i'], {}), '(image_j, image_i)\n', (9119, 9137), True, 'import numpy as np\n'), ((11377, 11399), 'networkx.Graph', 'nx.Graph', (['sorted_graph'], {}), '(sorted_graph)\n', (11385, 11399), True, 'import networkx as nx\n'), ((13165, 13198), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure_raw'], {}), '(structure_raw)\n', (13183, 13198), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((13946, 13956), 'numpy.log2', 'np.log2', (['(3)'], {}), '(3)\n', (13953, 13956), True, 'import numpy as np\n'), ((16527, 16580), 'itertools.product', 'itertools.product', (['[0, 1, -1]', '[0, 1, -1]', '[0, 1, -1]'], {}), '([0, 1, -1], [0, 1, -1], [0, 1, -1])\n', (16544, 16580), False, 'import itertools\n'), ((21247, 21276), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {}), '(structure)\n', (21265, 21276), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((14707, 14732), 'copy.copy', 'copy.copy', (['structure_save'], {}), '(structure_save)\n', (14716, 14732), False, 'import copy\n'), ((16868, 16892), 'pymatgen.core.periodic_table.Specie.from_string', 'Specie.from_string', (['item'], {}), '(item)\n', (16886, 16892), False, 'from pymatgen.core.periodic_table import Specie\n'), ((21622, 21723), 'pymatgen.core.surface.SlabGenerator', 'SlabGenerator', (['structure', '(h, k, l)'], {'min_slab_size': 'min_slab_size', 'min_vacuum_size': 'min_vacuum_size'}), '(structure, (h, k, l), min_slab_size=min_slab_size,\n min_vacuum_size=min_vacuum_size)\n', (21635, 21723), False, 'from pymatgen.core.surface import SlabGenerator\n'), ((17316, 17351), 'numpy.linalg.norm', 'np.linalg.norm', (['distance_ij'], {'axis': '(0)'}), '(distance_ij, axis=0)\n', (17330, 17351), True, 'import numpy as np\n'), ((18634, 18671), 'numpy.where', 'np.where', (['(connected_matrix[atom] != 0)'], {}), '(connected_matrix[atom] != 0)\n', (18642, 18671), True, 'import numpy as np\n'), ((15254, 15264), 'numpy.log2', 'np.log2', (['(3)'], {}), '(3)\n', (15261, 15264), True, 'import numpy as np\n')] |
import numpy as np
from deepscratch.dataloader.dataloader import DataLoader
class XOR(DataLoader):
def __init__(self):
self.x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
self.y = np.array([[0], [1], [1], [0]]) | [
"numpy.array"
] | [((143, 185), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (151, 185), True, 'import numpy as np\n'), ((203, 233), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (211, 233), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import os
reps = [ 1 , 2 , 3 , 4 , 5 ]
#reps = [ 1 ]
pwd = os.getcwd()
pkas = {}
for rep in reps:
allfiles = os.listdir(pwd+'/'+str(rep))
path = pwd + '/' + str(rep) + '/'
for filename in allfiles:
if( filename.split('.')[-1] == 'xvg' ):
fullpath = path + filename
filename = filename.replace('-','_').replace('.','_')
filename = filename.split('_')
res_name = str(filename[1])
res_numb = str(filename[2])
chain = str(filename[3])
residue = res_name + '-' + res_numb + '_' + chain
if( residue not in pkas ):
pkas[residue] = []
xvg = pd.read_csv( fullpath , sep='\t' , header=None)
xvg.columns = ['time','pka']
avg = xvg['pka'].mean()
std = xvg['pka'].std(ddof=1)
pkas[residue].append( avg )
residue_list = [x for x in pkas.keys()]
residue_list.sort()
with open('pka_avg.dat','w') as output_file:
for residue in residue_list:
vec = np.array( pkas[residue] )
avg = vec.mean()
err = vec.std() / np.sqrt( len(vec) )
output_file.write('%s\t%.6f\t%.6f\n' % (residue , avg, err))
| [
"numpy.array",
"pandas.read_csv",
"os.getcwd"
] | [((101, 112), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (110, 112), False, 'import os\n'), ((978, 1001), 'numpy.array', 'np.array', (['pkas[residue]'], {}), '(pkas[residue])\n', (986, 1001), True, 'import numpy as np\n'), ((650, 694), 'pandas.read_csv', 'pd.read_csv', (['fullpath'], {'sep': '"""\t"""', 'header': 'None'}), "(fullpath, sep='\\t', header=None)\n", (661, 694), True, 'import pandas as pd\n')] |
from __future__ import absolute_import
from sklearn import neural_network
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import argparse
from sklearn.metrics import accuracy_score
class MLPModel():
def __init__(self, filename, stock_filename, company, activation='logistic',
solver='lbfgs', alpha=0.01):
self.df = pd.read_csv(filename)
stock = pd.read_csv(stock_filename)
dates = [int(pd.to_datetime(date).strftime("%s")) for date in self.df['date']]
self.x = np.asarray([dates, self.df['neg'], self.df['pos'], self.df['neu']], dtype=float).T
self.y = np.asarray(stock[company + '_open'])
self.activation = activation
self.solver = solver
self.alpha = alpha
self.model = neural_network.MLPRegressor(hidden_layer_sizes=(500,),
activation=self.activation,
solver=self.solver,
alpha=self.alpha)
def train(self):
self.model.fit(self.x, self.y)
def predict(self):
return self.model.predict(self.x)
def mape(self):
self.train()
preds = self.predict()
score = 0
for count, pred in enumerate(preds, start=0):
score += abs(pred - self.y[count])
score /= len(self.y)
return score
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--feat_vec', required=True, help='feature vectors created using preprocess')
ap.add_argument('--stock', required=True, help='stock values')
ap.add_argument('--company', required=True)
args = ap.parse_args()
model = MLPModel(args.feat_vec, args.stock, args.company)
print("MAPE SCORE: ", model.mape())
return 0
if __name__ == "__main__":
main()
| [
"sklearn.neural_network.MLPRegressor",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.asarray",
"pandas.to_datetime"
] | [((1466, 1491), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1489, 1491), False, 'import argparse\n'), ((386, 407), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (397, 407), True, 'import pandas as pd\n'), ((424, 451), 'pandas.read_csv', 'pd.read_csv', (['stock_filename'], {}), '(stock_filename)\n', (435, 451), True, 'import pandas as pd\n'), ((656, 692), 'numpy.asarray', 'np.asarray', (["stock[company + '_open']"], {}), "(stock[company + '_open'])\n", (666, 692), True, 'import numpy as np\n'), ((807, 932), 'sklearn.neural_network.MLPRegressor', 'neural_network.MLPRegressor', ([], {'hidden_layer_sizes': '(500,)', 'activation': 'self.activation', 'solver': 'self.solver', 'alpha': 'self.alpha'}), '(hidden_layer_sizes=(500,), activation=self.\n activation, solver=self.solver, alpha=self.alpha)\n', (834, 932), False, 'from sklearn import neural_network\n'), ((556, 641), 'numpy.asarray', 'np.asarray', (["[dates, self.df['neg'], self.df['pos'], self.df['neu']]"], {'dtype': 'float'}), "([dates, self.df['neg'], self.df['pos'], self.df['neu']], dtype=float\n )\n", (566, 641), True, 'import numpy as np\n'), ((473, 493), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (487, 493), True, 'import pandas as pd\n')] |
import sampling_methods
import numpy as np
__all__ = ['Supervised', 'ActiveLearning']
class _Trainer():
def __init__(self, name, epoch, batch_size):
self.name = name
self.epoch = epoch
self.batch_size = batch_size
assert (type(epoch) is int and epoch > 0)
assert (type(batch_size) is int and batch_size > 0)
def train_model(self, model, dataset, verbose='auto', validation=True):
pass
class Supervised(_Trainer):
def __init__(self, epoch=15, batch_size=32):
super().__init__("Supervised Learning", epoch, batch_size)
def train_model(self, model, dataset, verbose='auto', validation=True):
if verbose != 'auto':
assert (type(verbose) is int and verbose in range(3))
if validation:
history = model.model.fit(dataset.train_data, dataset.train_labels,
validation_data=(dataset.test_data, dataset.test_labels),
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
else:
history = model.model.fit(dataset.train_data, dataset.train_labels,
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
history.history['val_loss'] = [0] * self.epoch
history.history['val_accuracy'] = [0] * self.epoch
model.history = history.history
## More information about the LeNet-5 architecture can be found in the link below.
## https://www.datacamp.com/community/tutorials/active-learning
class ActiveLearning(_Trainer):
def __init__(self, epoch=10, batch_size=32,
sampling_method=None, subsample_size=0,
active_learning_rounds=20, num_labels_to_learn=128,
adversary=None):
super().__init__("Active Learning", epoch, batch_size)
## If no sampling method is specified, just label next N unlabeled images
if sampling_method is None:
sampling_method = lambda model, data: np.arange(len(data))
self.sampling_method = sampling_method
self.subsample_size = subsample_size
self.active_learning_rounds = active_learning_rounds
self.num_labels_to_learn = num_labels_to_learn
self.adversary = adversary
assert (type(subsample_size) is int and subsample_size >= 0)
assert (type(active_learning_rounds) is int and active_learning_rounds > 0)
assert (type(num_labels_to_learn) is int and num_labels_to_learn > 0)
def train_model(self, model, dataset, verbose='auto', validation=True):
if verbose != 'auto':
assert (type(verbose) is int and verbose in range(3))
learned_data = dataset.train_data[:0]
learned_labels = dataset.train_labels[:0]
not_learned_data = dataset.train_data[0:]
not_learned_labels = dataset.train_labels[0:]
history = {'loss': [], 'accuracy': [], 'val_loss': [], 'val_accuracy': []}
## Label the first N elements in the 'not-learned' list
def label(n):
nonlocal learned_data, learned_labels, not_learned_data, not_learned_labels
if n > len(not_learned_data):
n = len(not_learned_data)
learned_data = np.concatenate((learned_data, not_learned_data[:n]))
learned_labels = np.concatenate((learned_labels, not_learned_labels[:n]))
not_learned_data = not_learned_data[n:]
not_learned_labels = not_learned_labels[n:]
## Train the model, record the history.
def train(i):
nonlocal self, model, dataset, learned_data, learned_labels, history, verbose
if verbose:
print("\nRound {}\nLearned Samples: {}\n".format(i, len(learned_data)))
if validation:
history_i = model.model.fit(learned_data, learned_labels,
validation_data=(dataset.test_data, dataset.test_labels),
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
else:
history_i = model.model.fit(learned_data, learned_labels,
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
history_i.history['val_loss'] = [0] * self.epoch
history_i.history['val_accuracy'] = [0] * self.epoch
history['loss'] += history_i.history['loss']
history['accuracy'] += history_i.history['accuracy']
history['val_loss'] += history_i.history['val_loss']
history['val_accuracy'] += history_i.history['val_accuracy']
## Sort the 'not-learned' list with a sampling method.
def pick_samples(n):
nonlocal self, model, not_learned_data, not_learned_labels
if n and n > self.num_labels_to_learn:
n = min(n, len(not_learned_data))
pidx = np.random.permutation(len(not_learned_data))
not_learned_data = not_learned_data[pidx]
not_learned_labels = not_learned_labels[pidx]
pidx = self.sampling_method(model.model, not_learned_data[:n])
not_learned_data[:n] = not_learned_data[pidx]
not_learned_labels[:n] = not_learned_labels[pidx]
else:
pidx = self.sampling_method(model.model, not_learned_data)
not_learned_data = not_learned_data[pidx]
not_learned_labels = not_learned_labels[pidx]
## If an attack is provided, generate artificial samples by adding adversary images with their original label to the 'learned' list
def use_adversary(attack, n):
nonlocal model, learned_data, learned_labels, not_learned_data, not_learned_labels
if n > len(not_learned_data):
n = len(not_learned_data)
adversary_data, _, _, _ = attack(model.model, not_learned_data[:n])
learned_data = np.concatenate((learned_data, adversary_data))
learned_labels = np.concatenate((learned_labels, not_learned_labels[:n]))
for i in range(self.active_learning_rounds - 1):
label(self.num_labels_to_learn)
if len(not_learned_data) == 0:
break
train(i+1)
pick_samples(self.subsample_size)
if self.adversary is not None:
use_adversary(self.adversary, self.num_labels_to_learn)
label(self.num_labels_to_learn)
train(i+1)
model.history = history
| [
"numpy.concatenate"
] | [((3482, 3534), 'numpy.concatenate', 'np.concatenate', (['(learned_data, not_learned_data[:n])'], {}), '((learned_data, not_learned_data[:n]))\n', (3496, 3534), True, 'import numpy as np\n'), ((3570, 3626), 'numpy.concatenate', 'np.concatenate', (['(learned_labels, not_learned_labels[:n])'], {}), '((learned_labels, not_learned_labels[:n]))\n', (3584, 3626), True, 'import numpy as np\n'), ((6433, 6479), 'numpy.concatenate', 'np.concatenate', (['(learned_data, adversary_data)'], {}), '((learned_data, adversary_data))\n', (6447, 6479), True, 'import numpy as np\n'), ((6511, 6567), 'numpy.concatenate', 'np.concatenate', (['(learned_labels, not_learned_labels[:n])'], {}), '((learned_labels, not_learned_labels[:n]))\n', (6525, 6567), True, 'import numpy as np\n')] |
import numpy as np
from krippendorff import alpha
# Example from: <NAME>. "Content Analysis: An Introduction to Its Methodology".
# Fourth Edition. 2019. SAGE Publishing.
# Chapter 12, page 290.
# 4 observers (rows). 11 units (columns)
# np.nan is missing data (observer did not code unit)
reliability_data = np.array([
[ 1., 2., 3., 3., 2., 1., 4., 1., 2., np.nan, np.nan],
[ 1., 2., 3., 3., 2., 2., 4., 1., 2., 5., np.nan],
[np.nan, 3., 3., 3., 2., 3., 4., 2., 2., 5., 1.],
[ 1., 2., 3., 3., 2., 4., 4., 1., 2., 5., 1.]
])
if __name__ == "__main__":
alpha_nominal = alpha(reliability_data=reliability_data,level_of_measurement='nominal')
print(f"Nominal: {alpha_nominal}")
assert(np.isclose(alpha_nominal, 0.743421052631579))
alpha_interval = alpha(reliability_data=reliability_data,level_of_measurement='interval')
print(f"Interval: {alpha_interval}")
assert(np.isclose(alpha_interval, 0.849, atol=0.001))
alpha_ratio = alpha(reliability_data=reliability_data,level_of_measurement='ratio')
print(f"Ratio: {alpha_ratio}")
assert(np.isclose(alpha_ratio, 0.797, atol=0.001))
alpha_ordinal = alpha(reliability_data=reliability_data,level_of_measurement='ordinal')
print(f"Ordinal: {alpha_ordinal}")
assert(np.isclose(alpha_ordinal, 0.815, atol=0.001))
| [
"numpy.array",
"numpy.isclose",
"krippendorff.alpha"
] | [((310, 573), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, np.nan, np.nan], [1.0, 2.0, \n 3.0, 3.0, 2.0, 2.0, 4.0, 1.0, 2.0, 5.0, np.nan], [np.nan, 3.0, 3.0, 3.0,\n 2.0, 3.0, 4.0, 2.0, 2.0, 5.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 4.0, 4.0,\n 1.0, 2.0, 5.0, 1.0]]'], {}), '([[1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, np.nan, np.nan], [\n 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 4.0, 1.0, 2.0, 5.0, np.nan], [np.nan, 3.0,\n 3.0, 3.0, 2.0, 3.0, 4.0, 2.0, 2.0, 5.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0,\n 4.0, 4.0, 1.0, 2.0, 5.0, 1.0]])\n', (318, 573), True, 'import numpy as np\n'), ((627, 699), 'krippendorff.alpha', 'alpha', ([], {'reliability_data': 'reliability_data', 'level_of_measurement': '"""nominal"""'}), "(reliability_data=reliability_data, level_of_measurement='nominal')\n", (632, 699), False, 'from krippendorff import alpha\n'), ((749, 793), 'numpy.isclose', 'np.isclose', (['alpha_nominal', '(0.743421052631579)'], {}), '(alpha_nominal, 0.743421052631579)\n', (759, 793), True, 'import numpy as np\n'), ((817, 890), 'krippendorff.alpha', 'alpha', ([], {'reliability_data': 'reliability_data', 'level_of_measurement': '"""interval"""'}), "(reliability_data=reliability_data, level_of_measurement='interval')\n", (822, 890), False, 'from krippendorff import alpha\n'), ((942, 987), 'numpy.isclose', 'np.isclose', (['alpha_interval', '(0.849)'], {'atol': '(0.001)'}), '(alpha_interval, 0.849, atol=0.001)\n', (952, 987), True, 'import numpy as np\n'), ((1008, 1078), 'krippendorff.alpha', 'alpha', ([], {'reliability_data': 'reliability_data', 'level_of_measurement': '"""ratio"""'}), "(reliability_data=reliability_data, level_of_measurement='ratio')\n", (1013, 1078), False, 'from krippendorff import alpha\n'), ((1124, 1166), 'numpy.isclose', 'np.isclose', (['alpha_ratio', '(0.797)'], {'atol': '(0.001)'}), '(alpha_ratio, 0.797, atol=0.001)\n', (1134, 1166), True, 'import numpy as np\n'), ((1189, 1261), 'krippendorff.alpha', 'alpha', ([], {'reliability_data': 'reliability_data', 'level_of_measurement': '"""ordinal"""'}), "(reliability_data=reliability_data, level_of_measurement='ordinal')\n", (1194, 1261), False, 'from krippendorff import alpha\n'), ((1311, 1355), 'numpy.isclose', 'np.isclose', (['alpha_ordinal', '(0.815)'], {'atol': '(0.001)'}), '(alpha_ordinal, 0.815, atol=0.001)\n', (1321, 1355), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>> 40819903
#
# Plotting script.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy
import os
def main(args):
myStuff = []
for i in range(0, 20):
myStuff.append( [i, i*2, i*3] )
filename = "testfile.txt"
printListListToFile(myStuff, filename)
data = readListListFromFile(filename)
# ~ data = readListListFromFile("real-test-tupples")
print(data)
#split into x y z arrays.
X, Y, Z = convertToThreeArrays(data)
picOutName = "graph-test"
printGraph(X, Y, Z, "A TITLE lol", "xLable", "yLable", "ZLable", picOutName)
os.system("sync")
os.system("optipng ../pics/*.png")
return 0
#Name remains a list
def convertToFiveArrays(data):
M, N, T, TIME, NAME = map(list, zip(*data))
M = numpy.array(M, dtype=numpy.float32)
N = numpy.array(N, dtype=numpy.float32)
T = numpy.array(T, dtype=numpy.float32)
TIME = numpy.array(TIME, dtype=numpy.float32)
return M, N, T, TIME, NAME
#Prints a list of tupples to file.
def printListListToFile(myTupples, filename):
with open( "./data/" + filename, "w") as myFile:
for thingy in myTupples:
for i in range(0, len(thingy)):
myFile.write(f"{thingy[i]}")
if (i + 1) != len(thingy):
myFile.write(" ")
else:
myFile.write("\n")
def readListListFromFile(filename):
outList = []
with open(filename, "r") as myFile:
for line in myFile:
thisList = line.rstrip().split(" ")
outList.append(thisList)
return outList
def printGraph(xArray, yArray, zArray, title, xLab, yLab, zLab, filename):
myFigure = plt.figure()
myFrame = myFigure.add_subplot(111, projection='3d')
myFrame.scatter(xArray, yArray, zArray, marker = "o")
# ~ myFrame.plot_trisurf(xArray, yArray, zArray)
# ~ myFrame.scatter(xArray, yArray, zArray, marker = "o", label = "TEST LABEL LOL")
# ~ myFrame.scatter(xArrayAho, yArrayAho, zArrayAho, marker = "^", label = "Aho Corasick")
myFrame.set_title(title)
myFrame.set_xlabel(xLab)
myFrame.set_ylabel(yLab)
myFrame.set_zlabel(zLab)
# ~ myFrame.axes.set_xlim3d(left=0, right=200)
# ~ myFrame.legend()
# ~ myFrame.legend(loc = 6, ncol = 1)
plt.savefig(filename + ".png")
# ~ plt.show()#
plt.clf()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"numpy.array",
"matplotlib.pyplot.figure",
"os.system"
] | [((637, 654), 'os.system', 'os.system', (['"""sync"""'], {}), "('sync')\n", (646, 654), False, 'import os\n'), ((656, 690), 'os.system', 'os.system', (['"""optipng ../pics/*.png"""'], {}), "('optipng ../pics/*.png')\n", (665, 690), False, 'import os\n'), ((808, 843), 'numpy.array', 'numpy.array', (['M'], {'dtype': 'numpy.float32'}), '(M, dtype=numpy.float32)\n', (819, 843), False, 'import numpy\n'), ((849, 884), 'numpy.array', 'numpy.array', (['N'], {'dtype': 'numpy.float32'}), '(N, dtype=numpy.float32)\n', (860, 884), False, 'import numpy\n'), ((890, 925), 'numpy.array', 'numpy.array', (['T'], {'dtype': 'numpy.float32'}), '(T, dtype=numpy.float32)\n', (901, 925), False, 'import numpy\n'), ((934, 972), 'numpy.array', 'numpy.array', (['TIME'], {'dtype': 'numpy.float32'}), '(TIME, dtype=numpy.float32)\n', (945, 972), False, 'import numpy\n'), ((1602, 1614), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1612, 1614), True, 'import matplotlib.pyplot as plt\n'), ((2176, 2206), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.png')"], {}), "(filename + '.png')\n", (2187, 2206), True, 'import matplotlib.pyplot as plt\n'), ((2225, 2234), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2232, 2234), True, 'import matplotlib.pyplot as plt\n')] |
"""Module providing high-level tools for linearizing and finding chi^2 minimizing
solutions to systems of equations.
Solvers: LinearSolver, LogProductSolver, and LinProductSolver.
These generally follow the form:
> data = {'a1*x+b1*y': np.array([5.,7]), 'a2*x+b2*y': np.array([4.,6])}
> ls = LinearSolver(data, a1=1., b1=np.array([2.,3]), a2=2., b2=np.array([1.,2]))
> sol = ls.solve()
where equations are passed in as a dictionary where each key is a string
describing the equation (which is parsed according to python syntax) and each
value is the corresponding "measured" value of that equation. Variable names
in equations are checked against keyword arguments to the solver to determine
if they are provided constants or parameters to be solved for. Parameter anmes
and solutions are return are returned as key:value pairs in ls.solve().
Parallel instances of equations can be evaluated by providing measured values
as numpy arrays. Constants can also be arrays that comply with standard numpy
broadcasting rules. Finally, weighting is implemented through an optional wgts
dictionary that parallels the construction of data.
LinearSolver solves linear equations of the form 'a*x + b*y + c*z'.
LogProductSolver uses logrithms to linearize equations of the form 'x*y*z'.
LinProductSolver uses symbolic Taylor expansion to linearize equations of the
form 'x*y + y*z'.
For more detail on usage, see linsolve_example.ipynb
"""
import numpy as np
import ast
from scipy.sparse import csc_matrix
import scipy.sparse.linalg
import scipy.linalg
import warnings
from copy import deepcopy
from functools import reduce
import tensorflow as tf
# Monkey patch for backward compatibility:
# ast.Num deprecated in Python 3.8. Make it an alias for ast.Constant
# if it gets removed.
if not hasattr(ast, "Num"):
ast.Num = ast.Constant
def ast_getterms(n):
"""Convert an AST parse tree into a list of terms. E.g. 'a*x1+b*x2' -> [[a,x1],[b,x2]]"""
if type(n) is ast.Name:
return [[n.id]]
elif type(n) is ast.Constant or type(n) is ast.Num:
return [[n.n]]
elif type(n) is ast.Expression:
return ast_getterms(n.body)
elif type(n) is ast.UnaryOp:
assert type(n.op) is ast.USub
return [[-1] + ast_getterms(n.operand)[0]]
elif type(n) is ast.BinOp:
if type(n.op) is ast.Mult:
return [ast_getterms(n.left)[0] + ast_getterms(n.right)[0]]
elif type(n.op) is ast.Add:
return ast_getterms(n.left) + ast_getterms(n.right)
elif type(n.op) is ast.Sub:
return ast_getterms(n.left) + [[-1] + ast_getterms(n.right)[0]]
else:
raise ValueError("Unsupported operation: %s" % str(n.op))
else:
raise ValueError("Unsupported: %s" % str(n))
def get_name(s, isconj=False):
"""Parse variable names of form 'var_' as 'var' + conjugation."""
if not type(s) is str:
if isconj:
return str(s), False
else:
return str(s)
if isconj:
return s.rstrip("_"), s.endswith("_") # tag names ending in '_' for conj
else:
return s.rstrip("_") # parse 'name_' as 'name' + conj
class Constant:
"""Container for constants (which can be arrays) in linear equations."""
def __init__(self, name, constants):
self.name = get_name(name)
if type(name) is str:
self.val = constants[self.name]
else:
self.val = name
try:
self.dtype = self.val.dtype
except (AttributeError):
self.dtype = type(self.val)
def shape(self):
try:
return self.val.shape
except (AttributeError):
return ()
def get_val(self, name=None):
"""Return value of constant. Handles conj if name='varname_' is requested
instead of name='varname'."""
if name is not None and type(name) is str:
name, conj = get_name(name, isconj=True)
assert self.name == name
if conj:
return self.val.conjugate()
else:
return self.val
else:
return self.val
class Parameter:
def __init__(self, name):
"""Container for parameters that are to be solved for."""
self.name = get_name(name)
def sparse_form(self, name, eqnum, prm_order, prefactor, re_im_split=True):
xs, ys, vals = [], [], []
# separated into real and imaginary parts iff one of the variables is conjugated with "_"
if re_im_split:
name, conj = get_name(name, True)
ordr, ordi = 2 * prm_order[self.name], 2 * prm_order[self.name] + 1
cr, ci = prefactor.real, prefactor.imag
i = 2 * eqnum
# (cr,ci) * (pr,pi) = (cr*pr-ci*pi, ci*pr+cr*pi)
xs.append(i)
ys.append(ordr)
vals.append(cr) # real component
xs.append(i + 1)
ys.append(ordr)
vals.append(ci) # imag component
if not conj:
xs.append(i)
ys.append(ordi)
vals.append(-ci) # imag component
xs.append(i + 1)
ys.append(ordi)
vals.append(cr) # imag component
else:
xs.append(i)
ys.append(ordi)
vals.append(ci) # imag component
xs.append(i + 1)
ys.append(ordi)
vals.append(-cr) # imag component
else:
xs.append(eqnum)
ys.append(prm_order[self.name])
vals.append(prefactor)
return xs, ys, vals
def get_sol(self, x, prm_order):
"""Extract prm value from appropriate row of x solution."""
if x.shape[0] > len(
prm_order
): # detect that we are splitting up real and imaginary parts
ordr, ordi = 2 * prm_order[self.name], 2 * prm_order[self.name] + 1
return {self.name: x[ordr] + np.complex64(1.0j) * x[ordi]}
else:
return {self.name: x[prm_order[self.name]]}
class LinearEquation:
"""Container for all prms and constants associated with a linear equation."""
def __init__(self, val, **kwargs):
self.val = val
if type(val) is str:
n = ast.parse(val, mode="eval")
val = ast_getterms(n)
self.wgts = kwargs.pop("wgts", np.float32(1.0))
self.has_conj = False
constants = kwargs.pop("constants", kwargs)
self.process_terms(val, constants)
def process_terms(self, terms, constants):
"""Classify terms from parsed str as Constant or Parameter."""
self.consts, self.prms = {}, {}
for term in terms:
for t in term:
try:
self.add_const(t, constants)
except (KeyError): # must be a parameter then
p = Parameter(t)
self.has_conj |= get_name(t, isconj=True)[
-1
] # keep track if any prms are conj
self.prms[p.name] = p
self.terms = self.order_terms(terms)
def add_const(self, name, constants):
"""Manually add a constant of given name to internal list of constants. Value is drawn from constants."""
n = get_name(name)
if n in constants and isinstance(constants[n], Constant):
c = constants[n]
else:
c = Constant(name, constants) # raises KeyError if not a constant
self.consts[c.name] = c
def order_terms(self, terms):
"""Reorder terms to obey (const1,const2,...,prm) ordering."""
for L in terms:
L.sort(key=lambda x: get_name(x) in self.prms)
# Validate that each term has exactly 1 unsolved parameter.
for t in terms:
assert get_name(t[-1]) in self.prms
for ti in t[:-1]:
assert type(ti) is not str or get_name(ti) in self.consts
return terms
def eval_consts(self, const_list, wgts=np.float32(1.0)):
"""Multiply out constants (and wgts) for placing in matrix."""
const_list = [self.consts[get_name(c)].get_val(c) for c in const_list]
return wgts ** 0.5 * reduce(lambda x, y: x * y, const_list, np.float32(1.0))
# this has the effect of putting the square root of the weights into each A matrix
# return 1. * reduce(lambda x,y: x*y, const_list, 1.)
def sparse_form(self, eqnum, prm_order, re_im_split=True):
"""Returns the row and col information and the values of coefficients to build up
part of the sparse (CSR) reprentation of the A matrix corresponding to this equation."""
xs, ys, vals = [], [], []
for term in self.terms:
p = self.prms[get_name(term[-1])]
f = self.eval_consts(term[:-1], self.wgts)
x, y, val = p.sparse_form(
term[-1], eqnum, prm_order, f.flatten(), re_im_split
)
xs += x
ys += y
vals += val
return xs, ys, vals
def eval(self, sol):
"""Given dict of parameter solutions, evaluate this equation."""
rv = 0
for term in self.terms:
total = self.eval_consts(term[:-1])
name, isconj = get_name(term[-1], isconj=True)
if isconj:
total *= np.conj(sol[name])
else:
total *= sol[name]
rv += total
return rv
def verify_weights(wgts, keys):
"""Given wgts and keys, ensure wgts have all keys and are all real.
If wgts == {} or None, return all 1s."""
if wgts is None or wgts == {}:
return {k: np.float32(1.0) for k in keys}
else:
for k in keys:
assert k in wgts # must have weights for all keys
assert (
np.iscomplexobj(wgts[k]) == False
) # tricky errors happen if wgts are complex
return wgts
def infer_dtype(values):
"""Given a list of values, return the appropriate numpy data
type for matrices, solutions.
Returns float32, float64, complex64, or complex128.
Python scalars will be treated float 32 or complex64 as appropriate.
Likewise, all int types will be treated as single precision floats."""
# ensure we are at least a float32 if we were passed integers
types = [np.dtype("float32")]
# determine the data type of all values
all_types = list(set([v.dtype if hasattr(v, "dtype") else type(v) for v in values]))
# split types into numpy vs. python dtypes
py_types = [t for t in all_types if not isinstance(t, np.dtype)]
np_types = [t for t in all_types if isinstance(t, np.dtype)]
# only use numpy dtypes that are floating/complex
types += [
t
for t in np_types
if np.issubdtype(t, np.floating) or np.issubdtype(t, np.complexfloating)
]
# if any python constants are complex, promote to complex, but otherwise
# don't promote to double if we have floats/doubles/ints in python
if complex in py_types:
types.append(np.dtype("complex64"))
# Use promote_types to determine the final floating/complex dtype
dtype = reduce(np.promote_types, types)
return dtype
class LinearSolver:
def __init__(self, data, wgts={}, sparse=False, **kwargs):
"""Set up a linear system of equations of the form 1*a + 2*b + 3*c = 4.
Args:
data: Dictionary that maps linear equations, written as valid python-interpetable strings
that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates.
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
# XXX add ability to override datatype inference
# see https://github.com/HERA-Team/linsolve/issues/30
self.data = data
self.keys = list(data.keys())
self.sparse = sparse
self.wgts = verify_weights(wgts, self.keys)
constants = kwargs.pop("constants", kwargs)
self.eqs = [
LinearEquation(k, wgts=self.wgts[k], constants=constants) for k in self.keys
]
# XXX add ability to have more than one measurment for a key=equation
# see https://github.com/HERA-Team/linsolve/issues/14
self.prms = {}
for eq in self.eqs:
self.prms.update(eq.prms)
self.consts = {}
for eq in self.eqs:
self.consts.update(eq.consts)
self.prm_order = {}
for i, p in enumerate(self.prms):
self.prm_order[p] = i
# infer dtype for later arrays
self.re_im_split = kwargs.pop("re_im_split", False)
# go through and figure out if any variables are conjugated
for eq in self.eqs:
self.re_im_split |= eq.has_conj
self.dtype = infer_dtype(
list(self.data.values())
+ list(self.consts.values())
+ list(self.wgts.values())
)
if self.re_im_split:
self.dtype = np.real(np.ones(1, dtype=self.dtype)).dtype
self.shape = self._shape()
def _shape(self):
"""Get broadcast shape of constants, weights for last dim of A"""
sh = []
for k in self.consts:
shk = self.consts[k].shape()
if len(shk) > len(sh):
sh += [0] * (len(shk) - len(sh))
for i in range(min(len(sh), len(shk))):
sh[i] = max(sh[i], shk[i])
for k in self.wgts:
try:
shk = self.wgts[k].shape
except (AttributeError):
continue
if len(shk) > len(sh):
sh += [0] * (len(shk) - len(sh))
for i in range(min(len(sh), len(shk))):
sh[i] = max(sh[i], shk[i])
return tuple(sh)
def _A_shape(self):
"""Get shape of A matrix (# eqs, # prms, data.size). Now always 3D."""
try:
sh = (
reduce(lambda x, y: x * y, self.shape),
) # flatten data dimensions so A is always 3D
except (TypeError):
sh = (1,)
if self.re_im_split:
return (2 * len(self.eqs), 2 * len(self.prm_order)) + sh
else:
return (len(self.eqs), len(self.prm_order)) + sh
def get_A(self):
"""Return A matrix for A*x=y."""
A = np.zeros(self._A_shape(), dtype=self.dtype)
xs, ys, vals = self.sparse_form()
ones = np.ones_like(A[0, 0])
# A[xs,ys] += [v * ones for v in vals] # This is broken when a single equation has the same param more than once
for x, y, v in zip(xs, ys, [v * ones for v in vals]):
A[x, y] += v # XXX ugly
return A
def sparse_form(self):
"""Returns a lists of lists of row and col numbers and coefficients in order to
express the linear system as a CSR sparse matrix."""
xs, ys, vals = [], [], []
for i, eq in enumerate(self.eqs):
x, y, val = eq.sparse_form(i, self.prm_order, self.re_im_split)
xs += x
ys += y
vals += val
return xs, ys, vals
def _get_A_sparse(self):
"""Fixes dimension needed for CSR sparse matrix representation."""
xs, ys, vals = self.sparse_form()
ones = np.ones(self._A_shape()[2:], dtype=self.dtype)
for n, val in enumerate(vals):
if not isinstance(val, np.ndarray) or val.size == 1:
vals[n] = ones * val
return np.array(xs), np.array(ys), np.array(vals, dtype=self.dtype).T
def get_weighted_data(self):
"""Return y = data * wgt**.5 as a 2D vector, regardless of original data/wgt shape."""
dtype = self.dtype # default
if self.re_im_split:
if dtype == np.float32:
dtype = np.complex64
else:
dtype = np.complex128
d = np.array([self.data[k] for k in self.keys], dtype=dtype)
if len(self.wgts) > 0:
w = np.array([self.wgts[k] for k in self.keys])
w.shape += (1,) * (d.ndim - w.ndim)
d.shape += (1,) * (w.ndim - d.ndim)
d = d * (w ** 0.5)
# this is w**.5 because A already has a factor of w**.5 in it, so
# (At N^-1 A)^1 At N^1 y ==> (At A)^1 At d (where d is the result of this
# function and A is redefined to include half of the weights)
self._data_shape = d.shape[1:] # store for reshaping sols to original
d.shape = (d.shape[0], -1) # Flatten
if self.re_im_split:
rv = np.empty((2 * d.shape[0],) + d.shape[1:], dtype=self.dtype)
rv[::2], rv[1::2] = d.real, d.imag
return rv
else:
return d
# This could help with repeated calls, but increases the runtime for single usage
# @tf.function
def _invert_lsqr(self, A, y, rcond=0, sparse=False):
"""
rcond:
rcond must be set to 0 to work for complex datasets
"""
dtype = y.dtype
"""
assert not (
dtype in [np.complex128] and rcond > 0
), "If using complex128 data, rcond must be equal to 0 for performance reasons"
"""
if dtype in [np.complex128]:
rcond = 0
x = tf.linalg.lstsq(
tf.transpose(A, perm=[2, 0, 1]),
tf.expand_dims(tf.transpose(y), axis=-1),
l2_regularizer=rcond,
)
return tf.squeeze(x)
def _invert_lsqr_stable(self, A, y, rcond=0, sparse=False):
"""
rcond:
rcond must be set to 0 to work for complex datasets
"""
dtype = y.dtype
if dtype in [np.complex128]:
# A = tf.cast(A, dtype='complex64')
# A = tf.cast(A, dtype='complex64')
rcond = 0
x = tf.linalg.lstsq(
tf.transpose(A, perm=[2, 0, 1]),
tf.expand_dims(tf.transpose(y), axis=-1),
l2_regularizer=rcond,
fast=False,
)
return tf.squeeze(x)
def _invert_lsqr_stable_sparse(self, A, y, rcond):
"""
rcond:
rcond must be set to 0 to work for complex datasets
"""
A = self._get_A_sparse(xs_ys_vals)
A = tf.convert_to_tensor(A)
return self._invert_lsqr_stable(A, y, rcond, sparse=True)
def _invert_lsqr_sparse(self, xs_ys_vals, y, rcond):
"""
"""
A = self._get_A_sparse(xs_ys_vals)
A = tf.convert_to_tensor(A)
return self._invert_lsqr(A, y, rcond, sparse=True)
# This could help with repeated calls, but increases the runtime for single usage
# @tf.function
def _invert_pinv(self, A, y, rcond, sparse=False):
"""
"""
dtype = y.dtype
A = tf.transpose(A, perm=[2, 0, 1])
AtA = tf.matmul(A, A, adjoint_a=True, a_is_sparse=sparse, b_is_sparse=sparse)
if dtype in [complex, np.complex64, np.complex128]:
# tensorflow does not allow for complex psuedo-inverses. Compute the value manually
R = tf.math.real(AtA)
C = tf.math.imag(AtA)
r0 = tf.matmul(tf.linalg.pinv(R), C)
y11 = tf.linalg.pinv(tf.matmul(C, r0) + R)
y10 = tf.matmul(-r0, y11)
AtAi = tf.cast(tf.complex(y11, y10), dtype=AtA.dtype)
else:
AtAi = tf.linalg.pinv(AtA, rcond=rcond)
# I probably don't need to expand_dims or transpose here
y = tf.expand_dims(tf.transpose(y), axis=-1)
Aty = tf.matmul(A, y, adjoint_a=True)
x = tf.squeeze(tf.matmul(AtAi, Aty))
return x
def _invert_pinv_sparse(self, xs_ys_vals, y, rcond):
"""
"""
A = self._get_A_sparse(xs_ys_vals)
A = tf.convert_to_tensor(A)
return self._invert_pinv(A, y, rcond, sparse=True)
# This could help with repeated calls, but increases the runtime for single usage
# @tf.function
def _invert_solve(self, A, y, rcond, sparse=False):
"""
"""
A = tf.transpose(A, perm=[2, 0, 1])
AtA = tf.matmul(A, A, adjoint_a=True, a_is_sparse=sparse, b_is_sparse=sparse)
y = tf.expand_dims(tf.transpose(y), axis=-1)
Aty = tf.matmul(A, y, adjoint_a=True, a_is_sparse=sparse,)
x = tf.linalg.solve(AtA, Aty)
return tf.squeeze(x)
def _invert_solve_sparse(self, xs_ys_vals, y, rcond):
"""
"""
A = self._get_A_sparse(xs_ys_vals)
A = tf.convert_to_tensor(A)
return self._invert_solve(A, y, rcond, sparse=True)
# This could help with repeated calls, but increases the runtime for single usage
# @tf.function
def _invert_pinv_shared(self, A, y, rcond, sparse=False):
"""
"""
AtA = tf.matmul(A, A, adjoint_a=True, a_is_sparse=sparse, b_is_sparse=sparse)
dtype = AtA.dtype
if dtype in [complex, np.complex64, np.complex128]:
# tensorflow does not allow for complex psuedo-inverses. Compute the value manually
R = tf.math.real(AtA)
C = tf.math.imag(AtA)
r0 = tf.matmul(tf.linalg.pinv(R), C)
y11 = tf.linalg.pinv(tf.matmul(C, r0) + R)
y10 = tf.matmul(-r0, y11)
AtAi = tf.cast(tf.complex(y11, y10), dtype=AtA.dtype)
else:
AtAi = tf.linalg.pinv(AtA, rcond=rcond)
return tf.transpose(
tf.matmul(AtAi, tf.matmul(A, y, adjoint_a=True, a_is_sparse=sparse))
)
def _invert_pinv_shared_sparse(self, xs_ys_vals, y, rcond):
"""
"""
A = self._get_A_sparse(xs_ys_vals)
A = tf.convert_to_tensor(A)
return self._invert_pinv_shared(A, y, rcond, sparse=True)
def _invert_default(self, A, y, rcond):
"""The default inverter, currently 'pinv'."""
# XXX doesn't deal w/ fact that individual matrices might
# fail for one inversion method.
# see https://github.com/HERA-Team/linsolve/issues/32
# XXX for now, lsqr is slower than pinv, but that may
# change once numpy supports stacks of matrices
# see https://github.com/HERA-Team/linsolve/issues/31
return self._invert_pinv(A, y, rcond)
def _invert_default_sparse(self, xs_ys_vals, y, rcond):
"""The default sparse inverter, currently 'pinv'."""
return self._invert_pinv_sparse(xs_ys_vals, y, rcond)
def solve(self, rcond=None, mode="default"):
"""Compute x' = (At A)^-1 At * y, returning x' as dict of prms:values.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of solutions with variables as keys
"""
assert mode in ["default", "lsqr", "lsqr_stable", "pinv", "solve"]
if rcond is None:
rcond = np.finfo(self.dtype).resolution
y = self.get_weighted_data()
y = tf.convert_to_tensor(y)
if self.sparse:
xs, ys, vals = self._get_A_sparse()
if vals.shape[0] == 1 and y.shape[-1] > 1: # reuse inverse
x = self._invert_pinv_shared_sparse((xs, ys, vals), y, rcond)
else: # we can't reuse inverses
if mode == "default":
_invert = self._invert_default_sparse
elif mode == "lsqr":
_invert = self._invert_lsqr_sparse
elif mode == "lsqr_stable":
_invert = self._invert_lsqr_stable_sparse
elif mode == "pinv":
_invert = self._invert_pinv_sparse
elif mode == "solve":
_invert = self._invert_solve_sparse
x = _invert((xs, ys, vals), y, rcond)
else:
A = self.get_A()
A = tf.convert_to_tensor(A)
Ashape = self._A_shape()
assert A.ndim == 3
if Ashape[-1] == 1 and y.shape[-1] > 1: # can reuse inverse
x = self._invert_pinv_shared(A[..., 0], y, rcond)
else: # we can't reuse inverses
if mode == "default":
_invert = self._invert_default
elif mode == "lsqr":
_invert = self._invert_lsqr
elif mode == "lsqr_stable":
_invert = self._invert_lsqr_stable
elif mode == "pinv":
_invert = self._invert_pinv
elif mode == "solve":
_invert = self._invert_solve
x = _invert(A, y, rcond)
# TODO: Keep this as a tensor if iterating
x = x.numpy().T
x.shape = x.shape[:1] + self._data_shape # restore to shape of original data
sol = {}
for p in list(self.prms.values()):
sol.update(p.get_sol(x, self.prm_order))
return sol
def eval(self, sol, keys=None):
"""Returns a dictionary evaluating data keys to the current values given sol and consts.
Uses the stored data object unless otherwise specified."""
if keys is None:
keys = self.keys
elif type(keys) is str:
keys = [keys]
elif type(keys) is dict:
keys = list(keys.keys())
result = {}
for k in keys:
eq = LinearEquation(k, **self.consts)
result[k] = eq.eval(sol)
return result
def _chisq(self, sol, data, wgts, evaluator):
"""Internal adaptable chisq calculator."""
if len(wgts) == 0:
sigma2 = {k: 1.0 for k in list(data.keys())} # equal weights
else:
sigma2 = {k: wgts[k] ** -1 for k in list(wgts.keys())}
evaluated = evaluator(sol, keys=data)
chisq = 0
for k in list(data.keys()):
chisq += np.abs(evaluated[k] - data[k]) ** 2 / sigma2[k]
return chisq
def chisq(self, sol, data=None, wgts=None):
"""Compute Chi^2 = |obs - mod|^2 / sigma^2 for the specified solution. Weights are treated as 1/sigma^2.
wgts = {} means sigma = 1. Default uses the stored data and weights unless otherwise overwritten."""
if data is None:
data = self.data
if wgts is None:
wgts = self.wgts
wgts = verify_weights(wgts, list(data.keys()))
return self._chisq(sol, data, wgts, self.eval)
# XXX need to add support for conjugated constants...maybe this already works because we have conjugated constants inherited from taylor expansion
# see https://github.com/HERA-Team/linsolve/issues/12
def conjterm(term, mode="amp"):
"""Modify prefactor for conjugated terms, according to mode='amp|phs|real|imag'."""
f = {"amp": 1, "phs": -1, "real": 1, "imag": 1j}[
mode
] # if KeyError, mode was invalid
terms = [[f, t[:-1]] if t.endswith("_") else [t] for t in term]
return reduce(lambda x, y: x + y, terms)
def jointerms(terms):
"""String that joins lists of lists of terms as the sum of products."""
return "+".join(["*".join(map(str, t)) for t in terms])
class LogProductSolver:
def __init__(self, data, wgts={}, sparse=False, **kwargs):
"""Set up a nonlinear system of equations of the form a*b = 1.0 to linearze via logarithm.
Args:
data: Dictionary that maps nonlinear product equations, written as valid python-interpetable
strings that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates (e.g. x*y_
parses as x * y.conj()).
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
keys = list(data.keys())
wgts = verify_weights(wgts, keys)
eqs = [ast_getterms(ast.parse(k, mode="eval")) for k in keys]
logamp, logphs = {}, {}
logampw, logphsw = {}, {}
for k, eq in zip(keys, eqs):
assert len(eq) == 1 # equations have to be purely products---no adds
eqamp = jointerms([conjterm([t], mode="amp") for t in eq[0]])
eqphs = jointerms([conjterm([t], mode="phs") for t in eq[0]])
dk = np.log(data[k])
logamp[eqamp], logphs[eqphs] = dk.real, dk.imag
try:
logampw[eqamp], logphsw[eqphs] = wgts[k], wgts[k]
except (KeyError):
pass
constants = kwargs.pop("constants", kwargs)
self.dtype = infer_dtype(
list(data.values()) + list(constants.values()) + list(wgts.values())
)
logamp_consts, logphs_consts = {}, {}
for k in constants:
c = np.log(constants[k]) # log unwraps complex circle at -pi
logamp_consts[k], logphs_consts[k] = c.real, c.imag
self.ls_amp = LinearSolver(
logamp, logampw, sparse=sparse, constants=logamp_consts
)
if self.dtype in (np.complex64, np.complex128):
# XXX worry about enumrating these here without
# explicitly ensuring that these are the support complex
# dtypes.
# see https://github.com/HERA-Team/linsolve/issues/33
self.ls_phs = LinearSolver(
logphs, logphsw, sparse=sparse, constants=logphs_consts
)
else:
self.ls_phs = None # no phase term to solve for
def solve(self, rcond=None, mode="default"):
"""Solve both amplitude and phase by taking the log of both sides to linearize.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of complex solutions with variables as keys
"""
sol_amp = self.ls_amp.solve(rcond=rcond, mode=mode)
if self.ls_phs is not None:
sol_phs = self.ls_phs.solve(rcond=rcond, mode=mode)
sol = {
k: np.exp(sol_amp[k] + np.complex64(1j) * sol_phs[k]).astype(self.dtype)
for k in sol_amp.keys()
}
else:
sol = {k: np.exp(sol_amp[k]).astype(self.dtype) for k in sol_amp.keys()}
return sol
def taylor_expand(terms, consts={}, prepend="d"):
"""First-order Taylor expand terms (product of variables or the sum of a
product of variables) wrt all parameters except those listed in consts."""
taylors = []
for term in terms:
taylors.append(term)
for term in terms:
for i, t in enumerate(term):
if type(t) is not str or get_name(t) in consts:
continue
taylors.append(term[:i] + [prepend + t] + term[i + 1 :])
return taylors
class GradientSolver:
def __init__(self, data, sol0, wgts={}, sparse=False, **kwargs):
"""
With tensorflows ability to compute gradients it makes sense to implement a
gradient solver. The question is: how do I make this fast with arbitrary
equations. Maybe start with a product version like linsolve and go from there
"""
pass
def solve(self):
"""
"""
pass
# XXX make a version of linproductsolver that taylor expands in e^{a+bi} form
# see https://github.com/HERA-Team/linsolve/issues/15
class LinProductSolver:
def __init__(self, data, sol0, wgts={}, sparse=False, **kwargs):
"""Set up a nonlinear system of equations of the form a*b + c*d = 1.0
to linearize via Taylor expansion and solve iteratively using the Gauss-Newton algorithm.
Args:
data: Dictionary that maps nonlinear product equations, written as valid python-interpetable
strings that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates (e.g. x*y_
parses as x * y.conj()).
sol0: Dictionary mapping all variables (as keyword strings) to their starting guess values.
This is the point that is Taylor expanded around, so it must be relatively close to the
true chi^2 minimizing solution. In the same format as that produced by
linsolve.LogProductSolver.solve() or linsolve.LinProductSolver.solve().
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
# XXX make this something hard to collide with
# see https://github.com/HERA-Team/linsolve/issues/17
self.prepend = "d"
self.data, self.sparse, self.keys = data, sparse, list(data.keys())
self.wgts = verify_weights(wgts, self.keys)
constants = kwargs.pop("constants", kwargs)
self.init_kwargs, self.sols_kwargs = constants, deepcopy(constants)
self.sols_kwargs.update(sol0)
self.all_terms, self.taylors, self.taylor_keys = self.gen_taylors()
self.build_solver(sol0)
self.dtype = self.ls.dtype
def gen_taylors(self, keys=None):
"""Parses all terms, performs a taylor expansion, and maps equation keys to taylor expansion keys."""
if keys is None:
keys = self.keys
all_terms = [ast_getterms(ast.parse(k, mode="eval")) for k in keys]
taylors, taylor_keys = [], {}
for terms, k in zip(all_terms, keys):
taylor = taylor_expand(terms, self.init_kwargs, prepend=self.prepend)
taylors.append(taylor)
taylor_keys[k] = jointerms(taylor[len(terms) :])
return all_terms, taylors, taylor_keys
def build_solver(self, sol0):
"""Builds a LinearSolver using the taylor expansions and all relevant constants.
Update it with the latest solutions."""
dlin, wlin = {}, {}
for k in self.keys:
tk = self.taylor_keys[k]
dlin[tk] = self.data[
k
] # in theory, this will always be replaced with data - ans0 before use
try:
wlin[tk] = self.wgts[k]
except (KeyError):
pass
self.ls = LinearSolver(
dlin, wgts=wlin, sparse=self.sparse, constants=self.sols_kwargs
)
self.eq_dict = {
eq.val: eq for eq in self.ls.eqs
} # maps taylor string expressions to linear equations
# Now make sure every taylor equation has every relevant constant, even if they don't appear in the derivative terms.
for k, terms in zip(self.keys, self.all_terms):
for term in terms:
for t in term:
t_name = get_name(t)
if t_name in self.sols_kwargs:
self.eq_dict[self.taylor_keys[k]].add_const(
t_name, self.sols_kwargs
)
self._update_solver(sol0)
def _update_solver(self, sol):
"""Update all constants in the internal LinearSolver and its LinearEquations based on new solutions.
Also update the residuals (data - ans0) for next iteration."""
self.sol0 = sol
self.sols_kwargs.update(sol)
for eq in self.ls.eqs:
for c in list(eq.consts.values()):
if c.name in sol:
eq.consts[c.name].val = self.sols_kwargs[c.name]
self.ls.consts.update(eq.consts)
ans0 = self._get_ans0(sol)
for k in ans0:
self.ls.data[self.taylor_keys[k]] = self.data[k] - ans0[k]
def _get_ans0(self, sol, keys=None):
"""Evaluate the system of equations given input sol.
Specify keys to evaluate only a subset of the equations."""
if keys is None:
keys = self.keys
all_terms = self.all_terms
taylors = self.taylors
else:
all_terms, taylors, _ = self.gen_taylors(keys)
ans0 = {}
for k, taylor, terms in zip(keys, taylors, all_terms):
eq = self.eq_dict[self.taylor_keys[k]]
ans0[k] = np.sum([eq.eval_consts(t) for t in taylor[: len(terms)]], axis=0)
return ans0
def solve(self, rcond=None, mode="default"):
"""Executes one iteration of a LinearSolver on the taylor-expanded system of
equations, improving sol0 to get sol.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of complex solutions with variables as keys
"""
dsol = self.ls.solve(rcond=rcond, mode=mode)
sol = {}
for dk in dsol:
k = dk[len(self.prepend) :]
sol[k] = self.sol0[k] + dsol[dk]
return sol
def eval(self, sol, keys=None):
"""Returns a dictionary evaluating data keys to the current values given sol and consts.
Uses the stored data object unless otherwise specified."""
if type(keys) is str:
keys = [keys]
elif type(keys) is dict:
keys = list(keys.keys())
return self._get_ans0(sol, keys=keys)
def chisq(self, sol, data=None, wgts=None):
"""Compute Chi^2 = |obs - mod|^2 / sigma^2 for the specified solution. Weights are treated as 1/sigma^2.
wgts = {} means sigma = 1. Uses the stored data and weights unless otherwise overwritten."""
if data is None:
data = self.data
if wgts is None:
wgts = self.wgts
wgts = verify_weights(wgts, list(data.keys()))
return self.ls._chisq(sol, data, wgts, self.eval)
def solve_iteratively(
self, conv_crit=None, maxiter=50, mode="default", verbose=False
):
"""Repeatedly solves and updates linsolve until convergence or maxiter is reached.
Returns a meta object containing the number of iterations, chisq, and convergence criterion.
Args:
conv_crit: A convergence criterion below which to stop iterating.
Converegence is measured L2-norm of the change in the solution of all the variables
divided by the L2-norm of the solution itself.
Default: None (resolves to machine precision for inferred dtype)
maxiter: An integer maximum number of iterations to perform before quitting. Default 50.
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
verbose: print information about iterations
Returns: meta, sol
meta: a dictionary with metadata about the solution, including
iter: the number of iterations taken to reach convergence (or maxiter)
chisq: the chi^2 of the solution produced by the final iteration
conv_crit: the convergence criterion evaluated at the final iteration
sol: a dictionary of complex solutions with variables as keys
"""
if conv_crit is None:
conv_crit = np.finfo(self.dtype).resolution
for i in range(1, maxiter + 1):
if verbose:
print("Beginning iteration %d/%d" % (i, maxiter))
# rcond=conv_crit works because you can't get better precision than the accuracy of your inversion
# and vice versa, there's no real point in inverting with greater precision than you are shooting for
new_sol = self.solve(rcond=conv_crit, mode=mode)
deltas = [new_sol[k] - self.sol0[k] for k in new_sol.keys()]
conv = np.linalg.norm(deltas, axis=0) / np.linalg.norm(
list(new_sol.values()), axis=0
)
if np.all(conv < conv_crit) or i == maxiter:
meta = {"iter": i, "chisq": self.chisq(new_sol), "conv_crit": conv}
return meta, new_sol
self._update_solver(new_sol)
| [
"tensorflow.math.imag",
"tensorflow.transpose",
"numpy.log",
"tensorflow.linalg.pinv",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"tensorflow.math.real",
"numpy.complex64",
"numpy.exp",
"numpy.issubdtype",
"numpy.empty",
"tensorflow.matmul",
"tensorflow.convert_to_tensor",
"ast... | [((11258, 11289), 'functools.reduce', 'reduce', (['np.promote_types', 'types'], {}), '(np.promote_types, types)\n', (11264, 11289), False, 'from functools import reduce\n'), ((28760, 28793), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'terms'], {}), '(lambda x, y: x + y, terms)\n', (28766, 28793), False, 'from functools import reduce\n'), ((8085, 8100), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (8095, 8100), True, 'import numpy as np\n'), ((10429, 10448), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (10437, 10448), True, 'import numpy as np\n'), ((15174, 15195), 'numpy.ones_like', 'np.ones_like', (['A[0, 0]'], {}), '(A[0, 0])\n', (15186, 15195), True, 'import numpy as np\n'), ((16619, 16675), 'numpy.array', 'np.array', (['[self.data[k] for k in self.keys]'], {'dtype': 'dtype'}), '([self.data[k] for k in self.keys], dtype=dtype)\n', (16627, 16675), True, 'import numpy as np\n'), ((18190, 18203), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {}), '(x)\n', (18200, 18203), True, 'import tensorflow as tf\n'), ((18765, 18778), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {}), '(x)\n', (18775, 18778), True, 'import tensorflow as tf\n'), ((18994, 19017), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['A'], {}), '(A)\n', (19014, 19017), True, 'import tensorflow as tf\n'), ((19221, 19244), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['A'], {}), '(A)\n', (19241, 19244), True, 'import tensorflow as tf\n'), ((19525, 19556), 'tensorflow.transpose', 'tf.transpose', (['A'], {'perm': '[2, 0, 1]'}), '(A, perm=[2, 0, 1])\n', (19537, 19556), True, 'import tensorflow as tf\n'), ((19571, 19642), 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'adjoint_a': '(True)', 'a_is_sparse': 'sparse', 'b_is_sparse': 'sparse'}), '(A, A, adjoint_a=True, a_is_sparse=sparse, b_is_sparse=sparse)\n', (19580, 19642), True, 'import tensorflow as tf\n'), ((20276, 20307), 'tensorflow.matmul', 'tf.matmul', (['A', 'y'], {'adjoint_a': '(True)'}), '(A, y, adjoint_a=True)\n', (20285, 20307), True, 'import tensorflow as tf\n'), ((20507, 20530), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['A'], {}), '(A)\n', (20527, 20530), True, 'import tensorflow as tf\n'), ((20788, 20819), 'tensorflow.transpose', 'tf.transpose', (['A'], {'perm': '[2, 0, 1]'}), '(A, perm=[2, 0, 1])\n', (20800, 20819), True, 'import tensorflow as tf\n'), ((20834, 20905), 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'adjoint_a': '(True)', 'a_is_sparse': 'sparse', 'b_is_sparse': 'sparse'}), '(A, A, adjoint_a=True, a_is_sparse=sparse, b_is_sparse=sparse)\n', (20843, 20905), True, 'import tensorflow as tf\n'), ((20973, 21024), 'tensorflow.matmul', 'tf.matmul', (['A', 'y'], {'adjoint_a': '(True)', 'a_is_sparse': 'sparse'}), '(A, y, adjoint_a=True, a_is_sparse=sparse)\n', (20982, 21024), True, 'import tensorflow as tf\n'), ((21038, 21063), 'tensorflow.linalg.solve', 'tf.linalg.solve', (['AtA', 'Aty'], {}), '(AtA, Aty)\n', (21053, 21063), True, 'import tensorflow as tf\n'), ((21079, 21092), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {}), '(x)\n', (21089, 21092), True, 'import tensorflow as tf\n'), ((21231, 21254), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['A'], {}), '(A)\n', (21251, 21254), True, 'import tensorflow as tf\n'), ((21521, 21592), 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'adjoint_a': '(True)', 'a_is_sparse': 'sparse', 'b_is_sparse': 'sparse'}), '(A, A, adjoint_a=True, a_is_sparse=sparse, b_is_sparse=sparse)\n', (21530, 21592), True, 'import tensorflow as tf\n'), ((22384, 22407), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['A'], {}), '(A)\n', (22404, 22407), True, 'import tensorflow as tf\n'), ((24814, 24837), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y'], {}), '(y)\n', (24834, 24837), True, 'import tensorflow as tf\n'), ((6324, 6351), 'ast.parse', 'ast.parse', (['val'], {'mode': '"""eval"""'}), "(val, mode='eval')\n", (6333, 6351), False, 'import ast\n'), ((6425, 6440), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (6435, 6440), True, 'import numpy as np\n'), ((9743, 9758), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (9753, 9758), True, 'import numpy as np\n'), ((11153, 11174), 'numpy.dtype', 'np.dtype', (['"""complex64"""'], {}), "('complex64')\n", (11161, 11174), True, 'import numpy as np\n'), ((16219, 16231), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (16227, 16231), True, 'import numpy as np\n'), ((16233, 16245), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (16241, 16245), True, 'import numpy as np\n'), ((16723, 16766), 'numpy.array', 'np.array', (['[self.wgts[k] for k in self.keys]'], {}), '([self.wgts[k] for k in self.keys])\n', (16731, 16766), True, 'import numpy as np\n'), ((17303, 17362), 'numpy.empty', 'np.empty', (['((2 * d.shape[0],) + d.shape[1:])'], {'dtype': 'self.dtype'}), '((2 * d.shape[0],) + d.shape[1:], dtype=self.dtype)\n', (17311, 17362), True, 'import numpy as np\n'), ((18044, 18075), 'tensorflow.transpose', 'tf.transpose', (['A'], {'perm': '[2, 0, 1]'}), '(A, perm=[2, 0, 1])\n', (18056, 18075), True, 'import tensorflow as tf\n'), ((18595, 18626), 'tensorflow.transpose', 'tf.transpose', (['A'], {'perm': '[2, 0, 1]'}), '(A, perm=[2, 0, 1])\n', (18607, 18626), True, 'import tensorflow as tf\n'), ((19816, 19833), 'tensorflow.math.real', 'tf.math.real', (['AtA'], {}), '(AtA)\n', (19828, 19833), True, 'import tensorflow as tf\n'), ((19850, 19867), 'tensorflow.math.imag', 'tf.math.imag', (['AtA'], {}), '(AtA)\n', (19862, 19867), True, 'import tensorflow as tf\n'), ((19990, 20009), 'tensorflow.matmul', 'tf.matmul', (['(-r0)', 'y11'], {}), '(-r0, y11)\n', (19999, 20009), True, 'import tensorflow as tf\n'), ((20110, 20142), 'tensorflow.linalg.pinv', 'tf.linalg.pinv', (['AtA'], {'rcond': 'rcond'}), '(AtA, rcond=rcond)\n', (20124, 20142), True, 'import tensorflow as tf\n'), ((20236, 20251), 'tensorflow.transpose', 'tf.transpose', (['y'], {}), '(y)\n', (20248, 20251), True, 'import tensorflow as tf\n'), ((20331, 20351), 'tensorflow.matmul', 'tf.matmul', (['AtAi', 'Aty'], {}), '(AtAi, Aty)\n', (20340, 20351), True, 'import tensorflow as tf\n'), ((20933, 20948), 'tensorflow.transpose', 'tf.transpose', (['y'], {}), '(y)\n', (20945, 20948), True, 'import tensorflow as tf\n'), ((21792, 21809), 'tensorflow.math.real', 'tf.math.real', (['AtA'], {}), '(AtA)\n', (21804, 21809), True, 'import tensorflow as tf\n'), ((21826, 21843), 'tensorflow.math.imag', 'tf.math.imag', (['AtA'], {}), '(AtA)\n', (21838, 21843), True, 'import tensorflow as tf\n'), ((21966, 21985), 'tensorflow.matmul', 'tf.matmul', (['(-r0)', 'y11'], {}), '(-r0, y11)\n', (21975, 21985), True, 'import tensorflow as tf\n'), ((22086, 22118), 'tensorflow.linalg.pinv', 'tf.linalg.pinv', (['AtA'], {'rcond': 'rcond'}), '(AtA, rcond=rcond)\n', (22100, 22118), True, 'import tensorflow as tf\n'), ((25698, 25721), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['A'], {}), '(A)\n', (25718, 25721), True, 'import tensorflow as tf\n'), ((30639, 30654), 'numpy.log', 'np.log', (['data[k]'], {}), '(data[k])\n', (30645, 30654), True, 'import numpy as np\n'), ((31117, 31137), 'numpy.log', 'np.log', (['constants[k]'], {}), '(constants[k])\n', (31123, 31137), True, 'import numpy as np\n'), ((36862, 36881), 'copy.deepcopy', 'deepcopy', (['constants'], {}), '(constants)\n', (36870, 36881), False, 'from copy import deepcopy\n'), ((8321, 8336), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (8331, 8336), True, 'import numpy as np\n'), ((9424, 9442), 'numpy.conj', 'np.conj', (['sol[name]'], {}), '(sol[name])\n', (9431, 9442), True, 'import numpy as np\n'), ((9907, 9931), 'numpy.iscomplexobj', 'np.iscomplexobj', (['wgts[k]'], {}), '(wgts[k])\n', (9922, 9931), True, 'import numpy as np\n'), ((10880, 10909), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.floating'], {}), '(t, np.floating)\n', (10893, 10909), True, 'import numpy as np\n'), ((10913, 10949), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.complexfloating'], {}), '(t, np.complexfloating)\n', (10926, 10949), True, 'import numpy as np\n'), ((14676, 14714), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'self.shape'], {}), '(lambda x, y: x * y, self.shape)\n', (14682, 14714), False, 'from functools import reduce\n'), ((16247, 16279), 'numpy.array', 'np.array', (['vals'], {'dtype': 'self.dtype'}), '(vals, dtype=self.dtype)\n', (16255, 16279), True, 'import numpy as np\n'), ((18104, 18119), 'tensorflow.transpose', 'tf.transpose', (['y'], {}), '(y)\n', (18116, 18119), True, 'import tensorflow as tf\n'), ((18655, 18670), 'tensorflow.transpose', 'tf.transpose', (['y'], {}), '(y)\n', (18667, 18670), True, 'import tensorflow as tf\n'), ((19895, 19912), 'tensorflow.linalg.pinv', 'tf.linalg.pinv', (['R'], {}), '(R)\n', (19909, 19912), True, 'import tensorflow as tf\n'), ((20037, 20057), 'tensorflow.complex', 'tf.complex', (['y11', 'y10'], {}), '(y11, y10)\n', (20047, 20057), True, 'import tensorflow as tf\n'), ((21871, 21888), 'tensorflow.linalg.pinv', 'tf.linalg.pinv', (['R'], {}), '(R)\n', (21885, 21888), True, 'import tensorflow as tf\n'), ((22013, 22033), 'tensorflow.complex', 'tf.complex', (['y11', 'y10'], {}), '(y11, y10)\n', (22023, 22033), True, 'import tensorflow as tf\n'), ((22177, 22228), 'tensorflow.matmul', 'tf.matmul', (['A', 'y'], {'adjoint_a': '(True)', 'a_is_sparse': 'sparse'}), '(A, y, adjoint_a=True, a_is_sparse=sparse)\n', (22186, 22228), True, 'import tensorflow as tf\n'), ((24733, 24753), 'numpy.finfo', 'np.finfo', (['self.dtype'], {}), '(self.dtype)\n', (24741, 24753), True, 'import numpy as np\n'), ((30247, 30272), 'ast.parse', 'ast.parse', (['k'], {'mode': '"""eval"""'}), "(k, mode='eval')\n", (30256, 30272), False, 'import ast\n'), ((37300, 37325), 'ast.parse', 'ast.parse', (['k'], {'mode': '"""eval"""'}), "(k, mode='eval')\n", (37309, 37325), False, 'import ast\n'), ((45002, 45022), 'numpy.finfo', 'np.finfo', (['self.dtype'], {}), '(self.dtype)\n', (45010, 45022), True, 'import numpy as np\n'), ((45542, 45572), 'numpy.linalg.norm', 'np.linalg.norm', (['deltas'], {'axis': '(0)'}), '(deltas, axis=0)\n', (45556, 45572), True, 'import numpy as np\n'), ((45667, 45691), 'numpy.all', 'np.all', (['(conv < conv_crit)'], {}), '(conv < conv_crit)\n', (45673, 45691), True, 'import numpy as np\n'), ((13738, 13766), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'self.dtype'}), '(1, dtype=self.dtype)\n', (13745, 13766), True, 'import numpy as np\n'), ((19950, 19966), 'tensorflow.matmul', 'tf.matmul', (['C', 'r0'], {}), '(C, r0)\n', (19959, 19966), True, 'import tensorflow as tf\n'), ((21926, 21942), 'tensorflow.matmul', 'tf.matmul', (['C', 'r0'], {}), '(C, r0)\n', (21935, 21942), True, 'import tensorflow as tf\n'), ((27694, 27724), 'numpy.abs', 'np.abs', (['(evaluated[k] - data[k])'], {}), '(evaluated[k] - data[k])\n', (27700, 27724), True, 'import numpy as np\n'), ((6010, 6028), 'numpy.complex64', 'np.complex64', (['(1.0j)'], {}), '(1.0j)\n', (6022, 6028), True, 'import numpy as np\n'), ((33662, 33680), 'numpy.exp', 'np.exp', (['sol_amp[k]'], {}), '(sol_amp[k])\n', (33668, 33680), True, 'import numpy as np\n'), ((33522, 33540), 'numpy.complex64', 'np.complex64', (['(1.0j)'], {}), '(1.0j)\n', (33534, 33540), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from scipy.sparse.linalg import spsolve
def fix_source(source, mask, shape, offset):
mydict = {}
counter = 0
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if mask[i][j]>127:
mydict[(i+offset[0], j+offset[1])] = counter
counter += 1
fixed_source = np.zeros(shape, dtype=int) #use int to avoid overflow
fixed_source[max(0, offset[0]):min(source.shape[0]+offset[0], shape[0]), max(0, offset[1]):min(source.shape[1]+offset[1],shape[1]),:]=source[max(0,-offset[0]):min(source.shape[0], shape[0]-offset[0]),max(0,-offset[1]):min(source.shape[1], shape[1]-offset[1]),:]
return fixed_source, mydict
offset = [[210, 10], [10, 28], [140, 80], [-40, 90], [60, 100], [-28, 88]]
for pic_index in range(1, 6):
mask = cv2.imread("../data/mask_0{0}.jpg".format(pic_index), 0)
source = cv2.imread("../data/source_0{0}.jpg".format(pic_index))
target = cv2.imread("../data/target_0{0}.jpg".format(pic_index))
fixed_source, D = fix_source(source, mask, target.shape, offset[pic_index-1]) #fixed source, same size with target
A = np.zeros((len(D),len(D)), dtype=int)
b = np.zeros((len(D),3), dtype=int)
for k, v in D.items():
A[v][v] = 4
b[v] += 4*fixed_source[k[0]][k[1]] \
- fixed_source[k[0]+1][k[1]] \
- fixed_source[k[0]-1][k[1]] \
- fixed_source[k[0]][k[1]+1] \
- fixed_source[k[0]][k[1]-1]
if (k[0]+1, k[1]) in D: # in D means this pixel is waiting to be calculated
A[v][D[(k[0]+1, k[1])]] = -1
else:
b[v] += target[k[0]+1][k[1]]
if (k[0]-1, k[1]) in D:
A[v][D[(k[0]-1, k[1])]] = -1
else:
b[v] += target[k[0]-1][k[1]]
if (k[0], k[1]+1) in D:
A[v][D[(k[0], k[1]+1)]] = -1
else:
b[v] += target[k[0]][k[1]+1]
if (k[0], k[1]-1) in D:
A[v][D[(k[0], k[1]-1)]] = -1
else:
b[v] += target[k[0]][k[1]-1]
x = spsolve(A, b)
for k, v in D.items():
if x[v][0]>255:
target[k[0]][k[1]][0] = np.uint8(255)
elif x[v][0]<0:
target[k[0]][k[1]][0] = np.uint8(0)
else:
target[k[0]][k[1]][0] = np.uint8(round(x[v][0]))
if x[v][1]>255:
target[k[0]][k[1]][1] = np.uint8(255)
elif x[v][1]<0:
target[k[0]][k[1]][1] = np.uint8(0)
else:
target[k[0]][k[1]][1] = np.uint8(round(x[v][1]))
if x[v][2]>255:
target[k[0]][k[1]][2] = np.uint8(255)
elif x[v][2]<0:
target[k[0]][k[1]][2] = np.uint8(0)
else:
target[k[0]][k[1]][2] = np.uint8(round(x[v][2]))
# target[k[0]][k[1]][0] = np.uint8(round(x[v][0])%256)
# target[k[0]][k[1]][1] = np.uint8(round(x[v][1])%256)
# target[k[0]][k[1]][2] = np.uint8(round(x[v][2])%256)
cv2.imwrite("result_0{0}.jpg".format(pic_index), target)
| [
"numpy.uint8",
"scipy.sparse.linalg.spsolve",
"numpy.zeros"
] | [((387, 413), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'int'}), '(shape, dtype=int)\n', (395, 413), True, 'import numpy as np\n'), ((2100, 2113), 'scipy.sparse.linalg.spsolve', 'spsolve', (['A', 'b'], {}), '(A, b)\n', (2107, 2113), False, 'from scipy.sparse.linalg import spsolve\n'), ((2202, 2215), 'numpy.uint8', 'np.uint8', (['(255)'], {}), '(255)\n', (2210, 2215), True, 'import numpy as np\n'), ((2424, 2437), 'numpy.uint8', 'np.uint8', (['(255)'], {}), '(255)\n', (2432, 2437), True, 'import numpy as np\n'), ((2646, 2659), 'numpy.uint8', 'np.uint8', (['(255)'], {}), '(255)\n', (2654, 2659), True, 'import numpy as np\n'), ((2276, 2287), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (2284, 2287), True, 'import numpy as np\n'), ((2498, 2509), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (2506, 2509), True, 'import numpy as np\n'), ((2720, 2731), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (2728, 2731), True, 'import numpy as np\n')] |
import sys
import time
import imageio
import tensorflow as tf
import numpy as np
image_path = sys.argv[1]
image = imageio.imread(image_path)
input_data = np.array([image])
print(input_data.shape)
saver = tf.train.import_meta_graph('./model.meta', clear_devices=True)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
with tf.Session(config = tf.ConfigProto(gpu_options=gpu_options)) as sess:
saver.restore(sess, 'model')
predictor = tf.get_collection("prediction")[0]
x = tf.get_collection("x")[0]
print(type(x))
result = sess.run(predictor, feed_dict={x: input_data})
print(result)
probability = result[0][1]
print(probability)
model_error = 0.85
model_range = 1 - 2 * (1-model_error)
probability = (1 - model_error) + probability * model_range
print(f"Reporting a probability of {probability}")
with open("result.txt", "w") as text_file:
text_file.write((str(probability)))
| [
"numpy.array",
"tensorflow.train.import_meta_graph",
"imageio.imread",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.get_collection"
] | [((116, 142), 'imageio.imread', 'imageio.imread', (['image_path'], {}), '(image_path)\n', (130, 142), False, 'import imageio\n'), ((156, 173), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (164, 173), True, 'import numpy as np\n'), ((208, 270), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./model.meta"""'], {'clear_devices': '(True)'}), "('./model.meta', clear_devices=True)\n", (234, 270), True, 'import tensorflow as tf\n'), ((286, 336), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.7)'}), '(per_process_gpu_memory_fraction=0.7)\n', (299, 336), True, 'import tensorflow as tf\n'), ((467, 498), 'tensorflow.get_collection', 'tf.get_collection', (['"""prediction"""'], {}), "('prediction')\n", (484, 498), True, 'import tensorflow as tf\n'), ((510, 532), 'tensorflow.get_collection', 'tf.get_collection', (['"""x"""'], {}), "('x')\n", (527, 532), True, 'import tensorflow as tf\n'), ((363, 402), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (377, 402), True, 'import tensorflow as tf\n')] |
'''
## Replay Memory ##
# Adapted from: https://github.com/tambetm/simple_dqn/blob/master/src/replay_memory.py
# Creates replay memory buffer to add experiences to and sample batches of experiences from
'''
import numpy as np
import random
class ReplayMemory:
def __init__(self, args):
self.buffer_size = args.replay_mem_size
self.min_buffer_size = args.initial_replay_mem_size
# preallocate memory
self.actions = np.empty(self.buffer_size, dtype = np.uint8)
self.rewards = np.empty(self.buffer_size, dtype = np.integer)
self.frames = np.empty((args.frame_height, args.frame_width, self.buffer_size), dtype = np.uint8)
self.terminals = np.empty(self.buffer_size, dtype = np.bool)
self.frames_per_state = args.frames_per_state
self.dims = (args.frame_height, args.frame_width)
self.batch_size = args.batch_size
self.count = 0
self.current = 0
self.states = np.empty((self.batch_size, args.frame_height, args.frame_width, self.frames_per_state), dtype = np.uint8)
self.next_states = np.empty((self.batch_size, args.frame_height, args.frame_width, self.frames_per_state), dtype = np.uint8)
def add(self, action, reward, frame, terminal):
assert frame.shape == self.dims
# NB! frame is post-state, after action and reward
self.actions[self.current] = action
self.rewards[self.current] = reward
self.frames[..., self.current] = frame
self.terminals[self.current] = terminal
self.count = max(self.count, self.current + 1)
self.current = (self.current + 1) % self.buffer_size
def getState(self, index):
# Takes the frame at position 'index' and returns a state consisting of this frame and the previous 3 frames.
return self.frames[..., (index - (self.frames_per_state - 1)):(index + 1)]
def getMinibatch(self):
# memory must include next_state, current state and (frames_per_state-1) previous states
assert self.count > self.frames_per_state, "Replay memory must contain more frames than the desired number of frames per state"
# memory should be initially populated with random actions up to 'min_buffer_size'
assert self.count >= self.min_buffer_size, "Replay memory does not contain enough samples to start learning, take random actions to populate replay memory"
# sample random indexes
indexes = []
# do until we have a full batch of states
while len(indexes) < self.batch_size:
# find random index
while True:
# sample one index
index = random.randint(self.frames_per_state, self.count - 1)
# check index is ok
# if wraps over current pointer, then get new one (as subsequent samples from current pointer position will not be from same episode)
if index >= self.current and index - self.frames_per_state < self.current:
continue
# if wraps over episode end (terminal state), then get new one (note that last frame can be terminal)
if self.terminals[(index - self.frames_per_state):index].any():
continue
# index is ok to use
break
# Populate states and next_states with selected state and next_state (consisting of a 4 frame sequence)
# NB! having index first is fastest in C-order matrices
self.states[len(indexes), ...] = self.getState(index - 1)
self.next_states[len(indexes), ...] = self.getState(index)
indexes.append(index)
actions = self.actions[indexes]
rewards = self.rewards[indexes]
terminals = self.terminals[indexes]
return self.states, actions, rewards, self.next_states, terminals
if __name__ == '__main__':
### For testing ###
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--frame_width", type=int, default=105, help="Frame width after resize.")
parser.add_argument("--frame_height", type=int, default=80, help="Frame height after resize.")
parser.add_argument("--frames_per_state", type=int, default=4, help="Sequence of frames which constitutes a single state.")
parser.add_argument("--batch_size", type=int, default=10)
args = parser.parse_args()
mem = ReplayMemory(100, args)
#Populate experience buffer
for i in range(0,105):
frame = np.random.randint(255, size=(args.frame_height, args.frame_width))
action = np.random.randint(4)
reward = np.random.randint(2)
terminal = np.random.choice(a=[False, False, False, False, False, False, False, False, True])
mem.add(action, reward, frame, terminal)
batch = mem.getMinibatch()
| [
"argparse.ArgumentParser",
"numpy.random.choice",
"numpy.random.randint",
"numpy.empty",
"random.randint"
] | [((4041, 4066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4064, 4066), False, 'import argparse\n'), ((452, 494), 'numpy.empty', 'np.empty', (['self.buffer_size'], {'dtype': 'np.uint8'}), '(self.buffer_size, dtype=np.uint8)\n', (460, 494), True, 'import numpy as np\n'), ((520, 564), 'numpy.empty', 'np.empty', (['self.buffer_size'], {'dtype': 'np.integer'}), '(self.buffer_size, dtype=np.integer)\n', (528, 564), True, 'import numpy as np\n'), ((589, 675), 'numpy.empty', 'np.empty', (['(args.frame_height, args.frame_width, self.buffer_size)'], {'dtype': 'np.uint8'}), '((args.frame_height, args.frame_width, self.buffer_size), dtype=np.\n uint8)\n', (597, 675), True, 'import numpy as np\n'), ((698, 739), 'numpy.empty', 'np.empty', (['self.buffer_size'], {'dtype': 'np.bool'}), '(self.buffer_size, dtype=np.bool)\n', (706, 739), True, 'import numpy as np\n'), ((975, 1083), 'numpy.empty', 'np.empty', (['(self.batch_size, args.frame_height, args.frame_width, self.frames_per_state)'], {'dtype': 'np.uint8'}), '((self.batch_size, args.frame_height, args.frame_width, self.\n frames_per_state), dtype=np.uint8)\n', (983, 1083), True, 'import numpy as np\n'), ((1108, 1216), 'numpy.empty', 'np.empty', (['(self.batch_size, args.frame_height, args.frame_width, self.frames_per_state)'], {'dtype': 'np.uint8'}), '((self.batch_size, args.frame_height, args.frame_width, self.\n frames_per_state), dtype=np.uint8)\n', (1116, 1216), True, 'import numpy as np\n'), ((4609, 4675), 'numpy.random.randint', 'np.random.randint', (['(255)'], {'size': '(args.frame_height, args.frame_width)'}), '(255, size=(args.frame_height, args.frame_width))\n', (4626, 4675), True, 'import numpy as np\n'), ((4693, 4713), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (4710, 4713), True, 'import numpy as np\n'), ((4731, 4751), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (4748, 4751), True, 'import numpy as np\n'), ((4771, 4857), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[False, False, False, False, False, False, False, False, True]'}), '(a=[False, False, False, False, False, False, False, False,\n True])\n', (4787, 4857), True, 'import numpy as np\n'), ((2716, 2769), 'random.randint', 'random.randint', (['self.frames_per_state', '(self.count - 1)'], {}), '(self.frames_per_state, self.count - 1)\n', (2730, 2769), False, 'import random\n')] |
import numpy as np
# Local Modules
from object import *
import utils
rng = np.random.default_rng()
def reflect_ray(n, eye, ph, roughness, diffuse=False):
if diffuse:
phi = rng.random() * 2 * np.pi
z = rng.random()
theta = np.arccos(z)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
nr = np.array([x, y, z])
reflected_ray = Ray(ph, nr)
else:
c = np.dot(n, eye)
r = -1 * eye + 2 * c * n
# Adding roughness
if roughness > 0:
# Random vector with 3 values between [-1, 1]
random_vector = 2 * np.random.random_sample(3) - 1
r = utils.normalize(r + roughness ** 2 * random_vector)
reflected_ray = Ray(ph, utils.normalize(r))
return reflected_ray
class Ray:
"""
Ray object that is used for raytracing. It has an intersect function to get
the intersection of the ray and a given object.
Attr:
pr: Origin point of the Ray
nr: Director vector for the ray
"""
def __init__(self, pr, nr):
self.pr = pr
self.nr = nr
def at(self, t):
"""
Get the point in the ray at position t.
Args:
t(float): The scalar that multiplies the director vector in the ray
Returns:
np.array: The point in 3D that represent the ray at position t
"""
return self.pr + t * self.nr
def intersect_plane(self, plane):
# Dot product of ray director and plane normal,
# if zero no intersection
dot_normals = np.dot(self.nr, plane.n)
if dot_normals == 0:
if np.dot((plane.position - self.pr), plane.n) == 0:
return 0
else:
return -1
t = np.dot((plane.position - self.pr), plane.n) / dot_normals
if t < 0:
return -1
return t
def intersect_sphere(self, sphere):
"""
Find t of intersection to a sphere, -1 means no intersection
"""
# Sphere center point
pc = sphere.position
dif = self.pr - pc
b = np.dot(self.nr, dif)
c = np.dot(dif, dif) - sphere.radius ** 2
discriminant = b ** 2 - c
if b > 0 or discriminant < 0:
return -1
t = -1 * b - np.sqrt(discriminant)
return t
def intersect_hollow_sphere(self, hollow_sphere):
"""
Find t of intersection to a sphere, -1 means no intersection
"""
# Sphere center point
pc = hollow_sphere.position
dif = self.pr - pc
b = np.dot(self.nr, dif)
c = np.dot(dif, dif) - hollow_sphere.radius ** 2
discriminant = b ** 2 - c
if discriminant < 0:
return -1
t = -1 * b + np.sqrt(discriminant)
return t
def intersect_triangle(self, triangle):
ray_t = self.intersect_plane(triangle)
p_in_plane = self.at(ray_t)
s, t = triangle.get_barycentric_coord(p_in_plane)
if 0 <= s <= 1 and 0 <= t <= 1 and 0 <= s + t <= 1:
return ray_t
return -1
def intersect_triangular_mesh(self, mesh):
triangles = mesh.get_triangles()
min_t = np.inf
for tr in triangles:
t = self.intersect_triangle(tr)
if 0 < t < min_t:
min_t = t
if min_t == np.inf:
return -1
return min_t
def intersect(self, obj):
"""
Find t of intersection, -1 value means no intersection.
"""
if isinstance(obj, HollowSphere):
return self.intersect_hollow_sphere(obj)
elif isinstance(obj, Sphere):
return self.intersect_sphere(obj)
elif isinstance(obj, Plane):
return self.intersect_plane(obj)
elif isinstance(obj, Tetrahedron) or isinstance(obj, Cube):
return self.intersect_triangular_mesh(obj)
elif isinstance(obj, Triangle):
return self.intersect_triangle(obj)
else:
return -1
| [
"numpy.arccos",
"numpy.random.default_rng",
"utils.normalize",
"numpy.sqrt",
"numpy.random.random_sample",
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.sin"
] | [((77, 100), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (98, 100), True, 'import numpy as np\n'), ((254, 266), 'numpy.arccos', 'np.arccos', (['z'], {}), '(z)\n', (263, 266), True, 'import numpy as np\n'), ((359, 372), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (365, 372), True, 'import numpy as np\n'), ((386, 405), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (394, 405), True, 'import numpy as np\n'), ((464, 478), 'numpy.dot', 'np.dot', (['n', 'eye'], {}), '(n, eye)\n', (470, 478), True, 'import numpy as np\n'), ((1624, 1648), 'numpy.dot', 'np.dot', (['self.nr', 'plane.n'], {}), '(self.nr, plane.n)\n', (1630, 1648), True, 'import numpy as np\n'), ((2171, 2191), 'numpy.dot', 'np.dot', (['self.nr', 'dif'], {}), '(self.nr, dif)\n', (2177, 2191), True, 'import numpy as np\n'), ((2649, 2669), 'numpy.dot', 'np.dot', (['self.nr', 'dif'], {}), '(self.nr, dif)\n', (2655, 2669), True, 'import numpy as np\n'), ((279, 292), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (285, 292), True, 'import numpy as np\n'), ((295, 306), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (301, 306), True, 'import numpy as np\n'), ((319, 332), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (325, 332), True, 'import numpy as np\n'), ((335, 346), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (341, 346), True, 'import numpy as np\n'), ((702, 753), 'utils.normalize', 'utils.normalize', (['(r + roughness ** 2 * random_vector)'], {}), '(r + roughness ** 2 * random_vector)\n', (717, 753), False, 'import utils\n'), ((786, 804), 'utils.normalize', 'utils.normalize', (['r'], {}), '(r)\n', (801, 804), False, 'import utils\n'), ((1824, 1865), 'numpy.dot', 'np.dot', (['(plane.position - self.pr)', 'plane.n'], {}), '(plane.position - self.pr, plane.n)\n', (1830, 1865), True, 'import numpy as np\n'), ((2204, 2220), 'numpy.dot', 'np.dot', (['dif', 'dif'], {}), '(dif, dif)\n', (2210, 2220), True, 'import numpy as np\n'), ((2357, 2378), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (2364, 2378), True, 'import numpy as np\n'), ((2682, 2698), 'numpy.dot', 'np.dot', (['dif', 'dif'], {}), '(dif, dif)\n', (2688, 2698), True, 'import numpy as np\n'), ((2833, 2854), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (2840, 2854), True, 'import numpy as np\n'), ((1693, 1734), 'numpy.dot', 'np.dot', (['(plane.position - self.pr)', 'plane.n'], {}), '(plane.position - self.pr, plane.n)\n', (1699, 1734), True, 'import numpy as np\n'), ((655, 681), 'numpy.random.random_sample', 'np.random.random_sample', (['(3)'], {}), '(3)\n', (678, 681), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various metric functions to evaluate locally interpretable models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
def fidelity_metrics(test_y_hat, test_y_fit, metric):
"""Computes fidelity metrics.
Fidelity is defined as the differences between black-box model.
predictions (test_y_hat) and locally interpretable model predictions
(test_y_fit).
Different metrics can be used such as mae, mse, rmse, r2 score.
Args:
test_y_hat: black-box model predictions
test_y_fit: locally interpretable model predictions
metric: metric to estimate the fidelity (mae, mse, rmse, r2 score)
Returns:
fidelity: fidelity result
"""
# Mean Absolute Error
if metric == 'mae':
fidelity = metrics.mean_absolute_error(test_y_hat, test_y_fit)
# Mean Squared Error
elif metric == 'mse':
fidelity = metrics.mean_squared_error(test_y_hat, test_y_fit)
# Root Mean Squared Error
elif metric == 'rmse':
fidelity = np.sqrt(metrics.mean_squared_error(test_y_hat, test_y_fit))
# R2 Score
elif metric == 'r2':
fidelity = metrics.r2_score(test_y_hat, test_y_fit)
return fidelity
def overall_performance_metrics(test_y, test_y_fit, metric):
"""Computes overall performance metrics.
Overall performance is defined as the differences between ground truth labels
(test_y) and locally interpretable model predictions (test_y_fit).
Different metrics can be used such as mae, mse, rmse, auc, accuracy.
Args:
test_y: ground truth labels
test_y_fit: locally interpretable model predictions
metric: metric to estimate the fidelity (mae, mse, rmse, auc, accuracy)
Returns:
overall_perf: overall prediction performance result
"""
# Mean Absolute Error
if metric == 'mae':
overall_perf = metrics.mean_absolute_error(test_y, test_y_fit)
# Mean Squared Error
elif metric == 'mse':
overall_perf = metrics.mean_squared_error(test_y, test_y_fit)
# Root Mean Squared Error
elif metric == 'rmse':
overall_perf = np.sqrt(metrics.mean_squared_error(test_y, test_y_fit))
# Area Under ROC Curve
elif metric == 'auc':
overall_perf = metrics.roc_auc_score(test_y, test_y_fit)
# Accuracy
elif metric == 'accuracy':
overall_perf = metrics.accuracy_score(np.argmax(test_y, axis=1),
np.argmax(test_y_fit, axis=1))
return overall_perf
def awd_metric(test_c, test_coef):
"""Computes absolute weight difference (AWD) metric.
Absolute weight difference (AWD) is defined as the differences between
ground truth local dynamics (test_c) and estimated local dynamics (test_coef).
Args:
test_c: ground truth local dynamics
test_coef: estimated local dynamics by locally interpretable model
Returns:
awd: absolute weight difference (AWD) performance result
"""
# Only for non-zero coefficients
test_c_nonzero = 1*(test_c > 0)
# Sum of absolute weight difference
awd_sum = np.sum(np.abs((test_c * test_c_nonzero) - \
(test_coef[:, 1:] * test_c_nonzero)))
# Mean of absolute weight difference
awd = awd_sum / np.sum(test_c_nonzero)
return awd
def plot_result(x_test, data_name, test_y_hat, test_y_fit,
test_c, test_coef, metric, criteria):
"""Plots various fidelity performances.
This module plots fidelity or AWD results with respect to
distance from the boundary where the local dynamics change (in percentile).
Args:
x_test: features in testing set
data_name: Syn1, Syn2 or Syn3
test_y_hat: black-box model predictions
test_y_fit: locally interpretable model predictions
test_c: ground truth local dynamics
test_coef: estimated local dynamics by locally interpretable model
metric: metrics for computing fidelity
criteria: 'Fidelity' or 'AWD';
"""
# Order of testing set index based on the
# distance from the boundary where the local dynamics change
if data_name == 'Syn1':
test_idx = np.argsort(np.abs(x_test[:, 9]))
elif data_name == 'Syn2':
test_idx = np.argsort(np.abs(x_test[:, 9] + np.exp(x_test[:, 10]) - 1))
elif data_name == 'Syn3':
test_idx = np.argsort(np.abs(x_test[:, 9] + np.power(x_test[:, 10], 3)))
# Determines x in terms of percentile
division = 10
x = [(1.0/(2*division)) + (1.0/division)*i for i in range(division)]
# Initializes output
output = np.zeros([division,])
# Parameters
thresh = (1.0/division)
test_no = len(test_idx)
# For each division (distance from the decision boundary)
for i in range(division):
# Samples in each division
temp_idx = test_idx[int(test_no*thresh*i):int(test_no*thresh*(i+1))]
if criteria == 'Fidelity':
# Computes fidelity
output[i] = fidelity_metrics(test_y_hat[temp_idx],
test_y_fit[temp_idx],
metric)
elif criteria == 'AWD':
# Computes AWD
output[i] = awd_metric(test_c[temp_idx, :], test_coef[temp_idx, :])
# Plots
plt.figure(figsize=(6, 4))
plt.plot(x, output, 'o-')
plt.xlabel('Distance from the boundary (percentile)', size=16)
if criteria == 'Fidelity':
plt.ylabel(metric, size=16)
elif criteria == 'AWD':
plt.ylabel('AWD', size=16)
plt.grid()
plt.legend(['RL-LIM - ' + criteria], prop={'size': 16})
plt.title(data_name + ' Dataset', size=16)
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argmax",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.roc_auc_score",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"sklearn.metr... | [((5113, 5133), 'numpy.zeros', 'np.zeros', (['[division]'], {}), '([division])\n', (5121, 5133), True, 'import numpy as np\n'), ((5747, 5773), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (5757, 5773), True, 'import matplotlib.pyplot as plt\n'), ((5776, 5801), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'output', '"""o-"""'], {}), "(x, output, 'o-')\n", (5784, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5804, 5866), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance from the boundary (percentile)"""'], {'size': '(16)'}), "('Distance from the boundary (percentile)', size=16)\n", (5814, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5987, 5997), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5995, 5997), True, 'import matplotlib.pyplot as plt\n'), ((6000, 6055), 'matplotlib.pyplot.legend', 'plt.legend', (["['RL-LIM - ' + criteria]"], {'prop': "{'size': 16}"}), "(['RL-LIM - ' + criteria], prop={'size': 16})\n", (6010, 6055), True, 'import matplotlib.pyplot as plt\n'), ((6058, 6100), 'matplotlib.pyplot.title', 'plt.title', (["(data_name + ' Dataset')"], {'size': '(16)'}), "(data_name + ' Dataset', size=16)\n", (6067, 6100), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6111, 6113), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1520), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['test_y_hat', 'test_y_fit'], {}), '(test_y_hat, test_y_fit)\n', (1496, 1520), False, 'from sklearn import metrics\n'), ((2513, 2560), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['test_y', 'test_y_fit'], {}), '(test_y, test_y_fit)\n', (2540, 2560), False, 'from sklearn import metrics\n'), ((3693, 3760), 'numpy.abs', 'np.abs', (['(test_c * test_c_nonzero - test_coef[:, 1:] * test_c_nonzero)'], {}), '(test_c * test_c_nonzero - test_coef[:, 1:] * test_c_nonzero)\n', (3699, 3760), True, 'import numpy as np\n'), ((3852, 3874), 'numpy.sum', 'np.sum', (['test_c_nonzero'], {}), '(test_c_nonzero)\n', (3858, 3874), True, 'import numpy as np\n'), ((5900, 5927), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['metric'], {'size': '(16)'}), '(metric, size=16)\n', (5910, 5927), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1633), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_y_hat', 'test_y_fit'], {}), '(test_y_hat, test_y_fit)\n', (1609, 1633), False, 'from sklearn import metrics\n'), ((2627, 2673), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_y', 'test_y_fit'], {}), '(test_y, test_y_fit)\n', (2653, 2673), False, 'from sklearn import metrics\n'), ((4719, 4739), 'numpy.abs', 'np.abs', (['x_test[:, 9]'], {}), '(x_test[:, 9])\n', (4725, 4739), True, 'import numpy as np\n'), ((5958, 5984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AWD"""'], {'size': '(16)'}), "('AWD', size=16)\n", (5968, 5984), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1760), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_y_hat', 'test_y_fit'], {}), '(test_y_hat, test_y_fit)\n', (1736, 1760), False, 'from sklearn import metrics\n'), ((1813, 1853), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['test_y_hat', 'test_y_fit'], {}), '(test_y_hat, test_y_fit)\n', (1829, 1853), False, 'from sklearn import metrics\n'), ((2754, 2800), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_y', 'test_y_fit'], {}), '(test_y, test_y_fit)\n', (2780, 2800), False, 'from sklearn import metrics\n'), ((2870, 2911), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['test_y', 'test_y_fit'], {}), '(test_y, test_y_fit)\n', (2891, 2911), False, 'from sklearn import metrics\n'), ((2996, 3021), 'numpy.argmax', 'np.argmax', (['test_y'], {'axis': '(1)'}), '(test_y, axis=1)\n', (3005, 3021), True, 'import numpy as np\n'), ((3065, 3094), 'numpy.argmax', 'np.argmax', (['test_y_fit'], {'axis': '(1)'}), '(test_y_fit, axis=1)\n', (3074, 3094), True, 'import numpy as np\n'), ((4817, 4838), 'numpy.exp', 'np.exp', (['x_test[:, 10]'], {}), '(x_test[:, 10])\n', (4823, 4838), True, 'import numpy as np\n'), ((4921, 4947), 'numpy.power', 'np.power', (['x_test[:, 10]', '(3)'], {}), '(x_test[:, 10], 3)\n', (4929, 4947), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.executor import Executor
from mars.tensor.datasource import tensor, arange
from mars.tensor.indexing import take, compress, extract, choose, \
unravel_index, nonzero, flatnonzero
from mars.tensor import mod, stack, hstack
from mars.config import options
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
self.old_chunk = options.tensor.chunk_size
options.tensor.chunk_size = 10
def tearDown(self):
options.tensor.chunk_size = self.old_chunk
def testBoolIndexingExecution(self):
raw = np.random.random((11, 8, 12, 14))
arr = tensor(raw, chunk_size=3)
index = arr < .5
arr2 = arr[index]
size_res = self.executor.execute_tensor(arr2, mock=True)
res = self.executor.execute_tensor(arr2)
self.assertEqual(sum(s[0] for s in size_res), arr.nbytes)
np.testing.assert_array_equal(np.sort(np.concatenate(res)), np.sort(raw[raw < .5]))
index2 = tensor(raw[:, :, 0, 0], chunk_size=3) < .5
arr3 = arr[index2]
res = self.executor.execute_tensor(arr3)
self.assertEqual(sum(it.size for it in res), raw[raw[:, :, 0, 0] < .5].size)
def testFancyIndexingNumpyExecution(self):
# test fancy index of type numpy ndarray
raw = np.random.random((11, 8, 12, 14))
arr = tensor(raw, chunk_size=(2, 3, 2, 3))
index = [8, 10, 3, 1, 9, 10]
arr2 = arr[index]
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw[index])
index = np.random.permutation(8)
arr3 = arr[:2, ..., index]
res = self.executor.execute_tensor(arr3, concat=True)
np.testing.assert_array_equal(res[0], raw[:2, ..., index])
index = [1, 3, 9, 10]
arr4 = arr[..., index, :5]
res = self.executor.execute_tensor(arr4, concat=True)
np.testing.assert_array_equal(res[0], raw[..., index, :5])
index1 = [8, 10, 3, 1, 9, 10]
index2 = [1, 3, 9, 10, 2, 7]
arr5 = arr[index1, :, index2]
res = self.executor.execute_tensor(arr5, concat=True)
np.testing.assert_array_equal(res[0], raw[index1, :, index2])
index1 = [1, 3, 5, 7, 9, 10]
index2 = [1, 9, 9, 10, 2, 7]
arr6 = arr[index1, :, index2]
res = self.executor.execute_tensor(arr6, concat=True)
np.testing.assert_array_equal(res[0], raw[index1, :, index2])
# fancy index is ordered, no concat required
self.assertGreater(len(arr6.nsplits[0]), 1)
index1 = [[8, 10, 3], [1, 9, 10]]
index2 = [[1, 3, 9], [10, 2, 7]]
arr7 = arr[index1, :, index2]
res = self.executor.execute_tensor(arr7, concat=True)
np.testing.assert_array_equal(res[0], raw[index1, :, index2])
index1 = [[1, 3], [3, 7], [7, 7]]
index2 = [1, 9]
arr8 = arr[0, index1, :, index2]
res = self.executor.execute_tensor(arr8, concat=True)
np.testing.assert_array_equal(res[0], raw[0, index1, :, index2])
def testFancyIndexingTensorExecution(self):
# test fancy index of type tensor
raw = np.random.random((11, 8, 12, 14))
arr = tensor(raw, chunk_size=(2, 3, 2, 3))
raw_index = [8, 10, 3, 1, 9, 10]
index = tensor(raw_index, chunk_size=4)
arr2 = arr[index]
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw[raw_index])
raw_index = np.random.permutation(8)
index = tensor(raw_index, chunk_size=3)
arr3 = arr[:2, ..., index]
res = self.executor.execute_tensor(arr3, concat=True)
np.testing.assert_array_equal(res[0], raw[:2, ..., raw_index])
raw_index = [1, 3, 9, 10]
index = tensor(raw_index)
arr4 = arr[..., index, :5]
res = self.executor.execute_tensor(arr4, concat=True)
np.testing.assert_array_equal(res[0], raw[..., raw_index, :5])
raw_index1 = [8, 10, 3, 1, 9, 10]
raw_index2 = [1, 3, 9, 10, 2, 7]
index1 = tensor(raw_index1, chunk_size=4)
index2 = tensor(raw_index2, chunk_size=3)
arr5 = arr[index1, :, index2]
res = self.executor.execute_tensor(arr5, concat=True)
np.testing.assert_array_equal(res[0], raw[raw_index1, :, raw_index2])
raw_index1 = [1, 3, 5, 7, 9, 10]
raw_index2 = [1, 9, 9, 10, 2, 7]
index1 = tensor(raw_index1, chunk_size=3)
index2 = tensor(raw_index2, chunk_size=4)
arr6 = arr[index1, :, index2]
res = self.executor.execute_tensor(arr6, concat=True)
np.testing.assert_array_equal(res[0], raw[raw_index1, :, raw_index2])
raw_index1 = [[8, 10, 3], [1, 9, 10]]
raw_index2 = [[1, 3, 9], [10, 2, 7]]
index1 = tensor(raw_index1)
index2 = tensor(raw_index2, chunk_size=2)
arr7 = arr[index1, :, index2]
res = self.executor.execute_tensor(arr7, concat=True)
np.testing.assert_array_equal(res[0], raw[raw_index1, :, raw_index2])
raw_index1 = [[1, 3], [3, 7], [7, 7]]
raw_index2 = [1, 9]
index1 = tensor(raw_index1, chunk_size=(2, 1))
index2 = tensor(raw_index2)
arr8 = arr[0, index1, :, index2]
res = self.executor.execute_tensor(arr8, concat=True)
np.testing.assert_array_equal(res[0], raw[0, raw_index1, :, raw_index2])
raw_a = np.random.rand(30, 30)
a = tensor(raw_a, chunk_size=(13, 17))
b = a.argmax(axis=0)
c = a[b, arange(30)]
res = self.executor.execute_tensor(c, concat=True)
np.testing.assert_array_equal(res[0], raw_a[raw_a.argmax(axis=0), np.arange(30)])
def testSliceExecution(self):
raw = np.random.random((11, 8, 12, 14))
arr = tensor(raw, chunk_size=3)
arr2 = arr[2:9:2, 3:7, -1:-9:-2, 12:-11:-4]
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw[2:9:2, 3:7, -1:-9:-2, 12:-11:-4])
arr3 = arr[-4, 2:]
res = self.executor.execute_tensor(arr3, concat=True)
np.testing.assert_equal(res[0], raw[-4, 2:])
raw = sps.random(12, 14, density=.1)
arr = tensor(raw, chunk_size=3)
arr2 = arr[-1:-9:-2, 12:-11:-4]
res = self.executor.execute_tensor(arr2, concat=True)[0]
np.testing.assert_equal(res.toarray(), raw.toarray()[-1:-9:-2, 12:-11:-4])
def testMixedIndexingExecution(self):
raw = np.random.random((11, 8, 12, 13))
arr = tensor(raw, chunk_size=3)
raw_cond = raw[0, :, 0, 0] < .5
cond = tensor(raw[0, :, 0, 0], chunk_size=3) < .5
arr2 = arr[10::-2, cond, None, ..., :5]
size_res = self.executor.execute_tensor(arr2, mock=True)
res = self.executor.execute_tensor(arr2, concat=True)
new_shape = list(arr2.shape)
new_shape[1] = cond.shape[0]
self.assertEqual(sum(s[0] for s in size_res), int(np.prod(new_shape) * arr2.dtype.itemsize))
np.testing.assert_array_equal(res[0], raw[10::-2, raw_cond, None, ..., :5])
b_raw = np.random.random(8)
cond = tensor(b_raw, chunk_size=2) < .5
arr3 = arr[-2::-3, cond, ...]
res = self.executor.execute_tensor(arr3, concat=True)
np.testing.assert_array_equal(res[0], raw[-2::-3, b_raw < .5, ...])
def testSetItemExecution(self):
raw = data = np.random.randint(0, 10, size=(11, 8, 12, 13))
arr = tensor(raw.copy(), chunk_size=3)
raw = raw.copy()
idx = slice(2, 9, 2), slice(3, 7), slice(-1, -9, -2), 2
arr[idx] = 20
res = self.executor.execute_tensor(arr, concat=True)
raw[idx] = 20
np.testing.assert_array_equal(res[0], raw)
raw = data
shape = raw[idx].shape
arr2 = tensor(raw.copy(), chunk_size=3)
raw = raw.copy()
replace = np.random.randint(10, 20, size=shape[:-1] + (1,)).astype('f4')
arr2[idx] = tensor(replace, chunk_size=4)
res = self.executor.execute_tensor(arr2, concat=True)
raw[idx] = replace
np.testing.assert_array_equal(res[0], raw)
def testSetItemStructuredExecution(self):
rec_type = np.dtype([('a', np.int32), ('b', np.double), ('c', np.dtype([('a', np.int16), ('b', np.int64)]))])
raw = np.zeros((4, 5), dtype=rec_type)
arr = tensor(raw.copy(), chunk_size=3)
arr[1:4, 1] = (3, 4., (5, 6))
arr[1:4, 2] = 8
arr[1:3] = np.arange(5)
arr[2:4] = np.arange(10).reshape(2, 5)
arr[0] = np.arange(5)
raw[1:4, 1] = (3, 4., (5, 6))
raw[1:4, 2] = 8
raw[1:3] = np.arange(5)
raw[2:4] = np.arange(10).reshape(2, 5)
raw[0] = np.arange(5)
res = self.executor.execute_tensor(arr, concat=True)
self.assertEqual(arr.dtype, raw.dtype)
self.assertEqual(arr.shape, raw.shape)
np.testing.assert_array_equal(res[0], raw)
def testTakeExecution(self):
data = np.random.rand(10, 20, 30)
t = tensor(data, chunk_size=10)
a = t.take([4, 1, 2, 6, 200])
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.take(data, [4, 1, 2, 6, 200])
np.testing.assert_array_equal(res, expected)
a = take(t, [5, 19, 2, 13], axis=1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.take(data, [5, 19, 2, 13], axis=1)
np.testing.assert_array_equal(res, expected)
def testCompressExecution(self):
data = np.array([[1, 2], [3, 4], [5, 6]])
a = tensor(data, chunk_size=1)
t = compress([0, 1], a, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.compress([0, 1], data, axis=0)
np.testing.assert_array_equal(res, expected)
t = compress([0, 1], a, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.compress([0, 1], data, axis=1)
np.testing.assert_array_equal(res, expected)
t = a.compress([0, 1, 1])
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.compress([0, 1, 1], data)
np.testing.assert_array_equal(res, expected)
t = compress([False, True, True], a, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.compress([False, True, True], data, axis=0)
np.testing.assert_array_equal(res, expected)
t = compress([False, True], a, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.compress([False, True], data, axis=1)
np.testing.assert_array_equal(res, expected)
with self.assertRaises(np.AxisError):
compress([0, 1, 1], a, axis=1)
def testExtractExecution(self):
data = np.arange(12).reshape((3, 4))
a = tensor(data, chunk_size=2)
condition = mod(a, 3) == 0
t = extract(condition, a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.extract(np.mod(data, 3) == 0, data)
np.testing.assert_array_equal(res, expected)
def testChooseExecution(self):
options.tensor.chunk_size = 2
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
a = choose([2, 3, 1, 0], choices)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.choose([2, 3, 1, 0], choices)
np.testing.assert_array_equal(res, expected)
a = choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
expected = np.choose([2, 4, 1, 0], choices, mode='clip')
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(res, expected)
a = choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
expected = np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(res, expected)
a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
choices = [-10, 10]
b = choose(a, choices)
expected = np.choose(a, choices)
res = self.executor.execute_tensor(b, concat=True)[0]
np.testing.assert_array_equal(res, expected)
a = np.array([0, 1]).reshape((2, 1, 1))
c1 = np.array([1, 2, 3]).reshape((1, 3, 1))
c2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5))
b = choose(a, (c1, c2))
expected = np.choose(a, (c1, c2))
res = self.executor.execute_tensor(b, concat=True)[0]
np.testing.assert_array_equal(res, expected)
def testUnravelExecution(self):
a = tensor([22, 41, 37], chunk_size=1)
t = stack(unravel_index(a, (7, 6)))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.stack(np.unravel_index([22, 41, 37], (7, 6)))
np.testing.assert_array_equal(res, expected)
def testNonzeroExecution(self):
data = np.array([[1, 0, 0], [0, 2, 0], [1, 1, 0]])
x = tensor(data, chunk_size=2)
t = hstack(nonzero(x))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.hstack(np.nonzero(data))
np.testing.assert_array_equal(res, expected)
def testFlatnonzeroExecution(self):
x = arange(-2, 3, chunk_size=2)
t = flatnonzero(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flatnonzero(np.arange(-2, 3))
np.testing.assert_equal(res, expected)
| [
"numpy.prod",
"numpy.random.rand",
"numpy.testing.assert_equal",
"mars.tensor.indexing.compress",
"numpy.array",
"mars.tensor.indexing.nonzero",
"mars.tensor.indexing.unravel_index",
"numpy.arange",
"numpy.mod",
"mars.tensor.indexing.take",
"numpy.random.random",
"numpy.sort",
"numpy.take",
... | [((1053, 1070), 'mars.executor.Executor', 'Executor', (['"""numpy"""'], {}), "('numpy')\n", (1061, 1070), False, 'from mars.executor import Executor\n'), ((1293, 1326), 'numpy.random.random', 'np.random.random', (['(11, 8, 12, 14)'], {}), '((11, 8, 12, 14))\n', (1309, 1326), True, 'import numpy as np\n'), ((1341, 1366), 'mars.tensor.datasource.tensor', 'tensor', (['raw'], {'chunk_size': '(3)'}), '(raw, chunk_size=3)\n', (1347, 1366), False, 'from mars.tensor.datasource import tensor, arange\n'), ((2026, 2059), 'numpy.random.random', 'np.random.random', (['(11, 8, 12, 14)'], {}), '((11, 8, 12, 14))\n', (2042, 2059), True, 'import numpy as np\n'), ((2074, 2110), 'mars.tensor.datasource.tensor', 'tensor', (['raw'], {'chunk_size': '(2, 3, 2, 3)'}), '(raw, chunk_size=(2, 3, 2, 3))\n', (2080, 2110), False, 'from mars.tensor.datasource import tensor, arange\n'), ((2246, 2295), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[index]'], {}), '(res[0], raw[index])\n', (2275, 2295), True, 'import numpy as np\n'), ((2313, 2337), 'numpy.random.permutation', 'np.random.permutation', (['(8)'], {}), '(8)\n', (2334, 2337), True, 'import numpy as np\n'), ((2444, 2502), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[:2, ..., index]'], {}), '(res[0], raw[:2, ..., index])\n', (2473, 2502), True, 'import numpy as np\n'), ((2640, 2698), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[..., index, :5]'], {}), '(res[0], raw[..., index, :5])\n', (2669, 2698), True, 'import numpy as np\n'), ((2884, 2945), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[index1, :, index2]'], {}), '(res[0], raw[index1, :, index2])\n', (2913, 2945), True, 'import numpy as np\n'), ((3130, 3191), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[index1, :, index2]'], {}), '(res[0], raw[index1, :, index2])\n', (3159, 3191), True, 'import numpy as np\n'), ((3490, 3551), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[index1, :, index2]'], {}), '(res[0], raw[index1, :, index2])\n', (3519, 3551), True, 'import numpy as np\n'), ((3731, 3795), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[0, index1, :, index2]'], {}), '(res[0], raw[0, index1, :, index2])\n', (3760, 3795), True, 'import numpy as np\n'), ((3902, 3935), 'numpy.random.random', 'np.random.random', (['(11, 8, 12, 14)'], {}), '((11, 8, 12, 14))\n', (3918, 3935), True, 'import numpy as np\n'), ((3950, 3986), 'mars.tensor.datasource.tensor', 'tensor', (['raw'], {'chunk_size': '(2, 3, 2, 3)'}), '(raw, chunk_size=(2, 3, 2, 3))\n', (3956, 3986), False, 'from mars.tensor.datasource import tensor, arange\n'), ((4045, 4076), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index'], {'chunk_size': '(4)'}), '(raw_index, chunk_size=4)\n', (4051, 4076), False, 'from mars.tensor.datasource import tensor, arange\n'), ((4174, 4227), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[raw_index]'], {}), '(res[0], raw[raw_index])\n', (4203, 4227), True, 'import numpy as np\n'), ((4249, 4273), 'numpy.random.permutation', 'np.random.permutation', (['(8)'], {}), '(8)\n', (4270, 4273), True, 'import numpy as np\n'), ((4290, 4321), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index'], {'chunk_size': '(3)'}), '(raw_index, chunk_size=3)\n', (4296, 4321), False, 'from mars.tensor.datasource import tensor, arange\n'), ((4428, 4490), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[:2, ..., raw_index]'], {}), '(res[0], raw[:2, ..., raw_index])\n', (4457, 4490), True, 'import numpy as np\n'), ((4542, 4559), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index'], {}), '(raw_index)\n', (4548, 4559), False, 'from mars.tensor.datasource import tensor, arange\n'), ((4666, 4728), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[..., raw_index, :5]'], {}), '(res[0], raw[..., raw_index, :5])\n', (4695, 4728), True, 'import numpy as np\n'), ((4830, 4862), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index1'], {'chunk_size': '(4)'}), '(raw_index1, chunk_size=4)\n', (4836, 4862), False, 'from mars.tensor.datasource import tensor, arange\n'), ((4880, 4912), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index2'], {'chunk_size': '(3)'}), '(raw_index2, chunk_size=3)\n', (4886, 4912), False, 'from mars.tensor.datasource import tensor, arange\n'), ((5022, 5091), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[raw_index1, :, raw_index2]'], {}), '(res[0], raw[raw_index1, :, raw_index2])\n', (5051, 5091), True, 'import numpy as np\n'), ((5192, 5224), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index1'], {'chunk_size': '(3)'}), '(raw_index1, chunk_size=3)\n', (5198, 5224), False, 'from mars.tensor.datasource import tensor, arange\n'), ((5242, 5274), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index2'], {'chunk_size': '(4)'}), '(raw_index2, chunk_size=4)\n', (5248, 5274), False, 'from mars.tensor.datasource import tensor, arange\n'), ((5384, 5453), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[raw_index1, :, raw_index2]'], {}), '(res[0], raw[raw_index1, :, raw_index2])\n', (5413, 5453), True, 'import numpy as np\n'), ((5563, 5581), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index1'], {}), '(raw_index1)\n', (5569, 5581), False, 'from mars.tensor.datasource import tensor, arange\n'), ((5599, 5631), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index2'], {'chunk_size': '(2)'}), '(raw_index2, chunk_size=2)\n', (5605, 5631), False, 'from mars.tensor.datasource import tensor, arange\n'), ((5741, 5810), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[raw_index1, :, raw_index2]'], {}), '(res[0], raw[raw_index1, :, raw_index2])\n', (5770, 5810), True, 'import numpy as np\n'), ((5903, 5940), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index1'], {'chunk_size': '(2, 1)'}), '(raw_index1, chunk_size=(2, 1))\n', (5909, 5940), False, 'from mars.tensor.datasource import tensor, arange\n'), ((5958, 5976), 'mars.tensor.datasource.tensor', 'tensor', (['raw_index2'], {}), '(raw_index2)\n', (5964, 5976), False, 'from mars.tensor.datasource import tensor, arange\n'), ((6089, 6161), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[0, raw_index1, :, raw_index2]'], {}), '(res[0], raw[0, raw_index1, :, raw_index2])\n', (6118, 6161), True, 'import numpy as np\n'), ((6179, 6201), 'numpy.random.rand', 'np.random.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (6193, 6201), True, 'import numpy as np\n'), ((6214, 6248), 'mars.tensor.datasource.tensor', 'tensor', (['raw_a'], {'chunk_size': '(13, 17)'}), '(raw_a, chunk_size=(13, 17))\n', (6220, 6248), False, 'from mars.tensor.datasource import tensor, arange\n'), ((6506, 6539), 'numpy.random.random', 'np.random.random', (['(11, 8, 12, 14)'], {}), '((11, 8, 12, 14))\n', (6522, 6539), True, 'import numpy as np\n'), ((6554, 6579), 'mars.tensor.datasource.tensor', 'tensor', (['raw'], {'chunk_size': '(3)'}), '(raw, chunk_size=3)\n', (6560, 6579), False, 'from mars.tensor.datasource import tensor, arange\n'), ((6704, 6779), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[2:9:2, 3:7, -1:-9:-2, 12:-11:-4]'], {}), '(res[0], raw[2:9:2, 3:7, -1:-9:-2, 12:-11:-4])\n', (6733, 6779), True, 'import numpy as np\n'), ((6878, 6922), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[0]', 'raw[-4, 2:]'], {}), '(res[0], raw[-4, 2:])\n', (6901, 6922), True, 'import numpy as np\n'), ((6938, 6969), 'scipy.sparse.random', 'sps.random', (['(12)', '(14)'], {'density': '(0.1)'}), '(12, 14, density=0.1)\n', (6948, 6969), True, 'import scipy.sparse as sps\n'), ((6983, 7008), 'mars.tensor.datasource.tensor', 'tensor', (['raw'], {'chunk_size': '(3)'}), '(raw, chunk_size=3)\n', (6989, 7008), False, 'from mars.tensor.datasource import tensor, arange\n'), ((7256, 7289), 'numpy.random.random', 'np.random.random', (['(11, 8, 12, 13)'], {}), '((11, 8, 12, 13))\n', (7272, 7289), True, 'import numpy as np\n'), ((7304, 7329), 'mars.tensor.datasource.tensor', 'tensor', (['raw'], {'chunk_size': '(3)'}), '(raw, chunk_size=3)\n', (7310, 7329), False, 'from mars.tensor.datasource import tensor, arange\n'), ((7788, 7863), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[10::-2, raw_cond, None, ..., :5]'], {}), '(res[0], raw[10::-2, raw_cond, None, ..., :5])\n', (7817, 7863), True, 'import numpy as np\n'), ((7881, 7900), 'numpy.random.random', 'np.random.random', (['(8)'], {}), '(8)\n', (7897, 7900), True, 'import numpy as np\n'), ((8058, 8126), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw[-2::-3, b_raw < 0.5, ...]'], {}), '(res[0], raw[-2::-3, b_raw < 0.5, ...])\n', (8087, 8126), True, 'import numpy as np\n'), ((8184, 8230), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '(11, 8, 12, 13)'}), '(0, 10, size=(11, 8, 12, 13))\n', (8201, 8230), True, 'import numpy as np\n'), ((8482, 8524), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw'], {}), '(res[0], raw)\n', (8511, 8524), True, 'import numpy as np\n'), ((8752, 8781), 'mars.tensor.datasource.tensor', 'tensor', (['replace'], {'chunk_size': '(4)'}), '(replace, chunk_size=4)\n', (8758, 8781), False, 'from mars.tensor.datasource import tensor, arange\n'), ((8880, 8922), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw'], {}), '(res[0], raw)\n', (8909, 8922), True, 'import numpy as np\n'), ((9103, 9135), 'numpy.zeros', 'np.zeros', (['(4, 5)'], {'dtype': 'rec_type'}), '((4, 5), dtype=rec_type)\n', (9111, 9135), True, 'import numpy as np\n'), ((9265, 9277), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9274, 9277), True, 'import numpy as np\n'), ((9342, 9354), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9351, 9354), True, 'import numpy as np\n'), ((9437, 9449), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9446, 9449), True, 'import numpy as np\n'), ((9514, 9526), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9523, 9526), True, 'import numpy as np\n'), ((9691, 9733), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res[0]', 'raw'], {}), '(res[0], raw)\n', (9720, 9733), True, 'import numpy as np\n'), ((9783, 9809), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (9797, 9809), True, 'import numpy as np\n'), ((9822, 9849), 'mars.tensor.datasource.tensor', 'tensor', (['data'], {'chunk_size': '(10)'}), '(data, chunk_size=10)\n', (9828, 9849), False, 'from mars.tensor.datasource import tensor, arange\n'), ((9971, 10003), 'numpy.take', 'np.take', (['data', '[4, 1, 2, 6, 200]'], {}), '(data, [4, 1, 2, 6, 200])\n', (9978, 10003), True, 'import numpy as np\n'), ((10012, 10056), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10041, 10056), True, 'import numpy as np\n'), ((10070, 10101), 'mars.tensor.indexing.take', 'take', (['t', '[5, 19, 2, 13]'], {'axis': '(1)'}), '(t, [5, 19, 2, 13], axis=1)\n', (10074, 10101), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((10184, 10221), 'numpy.take', 'np.take', (['data', '[5, 19, 2, 13]'], {'axis': '(1)'}), '(data, [5, 19, 2, 13], axis=1)\n', (10191, 10221), True, 'import numpy as np\n'), ((10230, 10274), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10259, 10274), True, 'import numpy as np\n'), ((10328, 10362), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (10336, 10362), True, 'import numpy as np\n'), ((10375, 10401), 'mars.tensor.datasource.tensor', 'tensor', (['data'], {'chunk_size': '(1)'}), '(data, chunk_size=1)\n', (10381, 10401), False, 'from mars.tensor.datasource import tensor, arange\n'), ((10415, 10442), 'mars.tensor.indexing.compress', 'compress', (['[0, 1]', 'a'], {'axis': '(0)'}), '([0, 1], a, axis=0)\n', (10423, 10442), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((10525, 10558), 'numpy.compress', 'np.compress', (['[0, 1]', 'data'], {'axis': '(0)'}), '([0, 1], data, axis=0)\n', (10536, 10558), True, 'import numpy as np\n'), ((10567, 10611), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10596, 10611), True, 'import numpy as np\n'), ((10625, 10652), 'mars.tensor.indexing.compress', 'compress', (['[0, 1]', 'a'], {'axis': '(1)'}), '([0, 1], a, axis=1)\n', (10633, 10652), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((10735, 10768), 'numpy.compress', 'np.compress', (['[0, 1]', 'data'], {'axis': '(1)'}), '([0, 1], data, axis=1)\n', (10746, 10768), True, 'import numpy as np\n'), ((10777, 10821), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10806, 10821), True, 'import numpy as np\n'), ((10939, 10967), 'numpy.compress', 'np.compress', (['[0, 1, 1]', 'data'], {}), '([0, 1, 1], data)\n', (10950, 10967), True, 'import numpy as np\n'), ((10976, 11020), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (11005, 11020), True, 'import numpy as np\n'), ((11034, 11074), 'mars.tensor.indexing.compress', 'compress', (['[False, True, True]', 'a'], {'axis': '(0)'}), '([False, True, True], a, axis=0)\n', (11042, 11074), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((11157, 11203), 'numpy.compress', 'np.compress', (['[False, True, True]', 'data'], {'axis': '(0)'}), '([False, True, True], data, axis=0)\n', (11168, 11203), True, 'import numpy as np\n'), ((11212, 11256), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (11241, 11256), True, 'import numpy as np\n'), ((11270, 11304), 'mars.tensor.indexing.compress', 'compress', (['[False, True]', 'a'], {'axis': '(1)'}), '([False, True], a, axis=1)\n', (11278, 11304), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((11387, 11427), 'numpy.compress', 'np.compress', (['[False, True]', 'data'], {'axis': '(1)'}), '([False, True], data, axis=1)\n', (11398, 11427), True, 'import numpy as np\n'), ((11436, 11480), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (11465, 11480), True, 'import numpy as np\n'), ((11665, 11691), 'mars.tensor.datasource.tensor', 'tensor', (['data'], {'chunk_size': '(2)'}), '(data, chunk_size=2)\n', (11671, 11691), False, 'from mars.tensor.datasource import tensor, arange\n'), ((11740, 11761), 'mars.tensor.indexing.extract', 'extract', (['condition', 'a'], {}), '(condition, a)\n', (11747, 11761), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((11891, 11935), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (11920, 11935), True, 'import numpy as np\n'), ((12129, 12158), 'mars.tensor.indexing.choose', 'choose', (['[2, 3, 1, 0]', 'choices'], {}), '([2, 3, 1, 0], choices)\n', (12135, 12158), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((12241, 12273), 'numpy.choose', 'np.choose', (['[2, 3, 1, 0]', 'choices'], {}), '([2, 3, 1, 0], choices)\n', (12250, 12273), True, 'import numpy as np\n'), ((12283, 12327), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (12312, 12327), True, 'import numpy as np\n'), ((12341, 12383), 'mars.tensor.indexing.choose', 'choose', (['[2, 4, 1, 0]', 'choices'], {'mode': '"""clip"""'}), "([2, 4, 1, 0], choices, mode='clip')\n", (12347, 12383), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((12424, 12469), 'numpy.choose', 'np.choose', (['[2, 4, 1, 0]', 'choices'], {'mode': '"""clip"""'}), "([2, 4, 1, 0], choices, mode='clip')\n", (12433, 12469), True, 'import numpy as np\n'), ((12541, 12585), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (12570, 12585), True, 'import numpy as np\n'), ((12599, 12641), 'mars.tensor.indexing.choose', 'choose', (['[2, 4, 1, 0]', 'choices'], {'mode': '"""wrap"""'}), "([2, 4, 1, 0], choices, mode='wrap')\n", (12605, 12641), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((12684, 12729), 'numpy.choose', 'np.choose', (['[2, 4, 1, 0]', 'choices'], {'mode': '"""wrap"""'}), "([2, 4, 1, 0], choices, mode='wrap')\n", (12693, 12729), True, 'import numpy as np\n'), ((12824, 12868), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (12853, 12868), True, 'import numpy as np\n'), ((12957, 12975), 'mars.tensor.indexing.choose', 'choose', (['a', 'choices'], {}), '(a, choices)\n', (12963, 12975), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((12995, 13016), 'numpy.choose', 'np.choose', (['a', 'choices'], {}), '(a, choices)\n', (13004, 13016), True, 'import numpy as np\n'), ((13088, 13132), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (13117, 13132), True, 'import numpy as np\n'), ((13310, 13329), 'mars.tensor.indexing.choose', 'choose', (['a', '(c1, c2)'], {}), '(a, (c1, c2))\n', (13316, 13329), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((13349, 13371), 'numpy.choose', 'np.choose', (['a', '(c1, c2)'], {}), '(a, (c1, c2))\n', (13358, 13371), True, 'import numpy as np\n'), ((13443, 13487), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (13472, 13487), True, 'import numpy as np\n'), ((13537, 13571), 'mars.tensor.datasource.tensor', 'tensor', (['[22, 41, 37]'], {'chunk_size': '(1)'}), '([22, 41, 37], chunk_size=1)\n', (13543, 13571), False, 'from mars.tensor.datasource import tensor, arange\n'), ((13756, 13800), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (13785, 13800), True, 'import numpy as np\n'), ((13853, 13896), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 2, 0], [1, 1, 0]]'], {}), '([[1, 0, 0], [0, 2, 0], [1, 1, 0]])\n', (13861, 13896), True, 'import numpy as np\n'), ((13909, 13935), 'mars.tensor.datasource.tensor', 'tensor', (['data'], {'chunk_size': '(2)'}), '(data, chunk_size=2)\n', (13915, 13935), False, 'from mars.tensor.datasource import tensor, arange\n'), ((14086, 14130), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['res', 'expected'], {}), '(res, expected)\n', (14115, 14130), True, 'import numpy as np\n'), ((14184, 14211), 'mars.tensor.datasource.arange', 'arange', (['(-2)', '(3)'], {'chunk_size': '(2)'}), '(-2, 3, chunk_size=2)\n', (14190, 14211), False, 'from mars.tensor.datasource import tensor, arange\n'), ((14225, 14239), 'mars.tensor.indexing.flatnonzero', 'flatnonzero', (['x'], {}), '(x)\n', (14236, 14239), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((14364, 14402), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14387, 14402), True, 'import numpy as np\n'), ((1668, 1691), 'numpy.sort', 'np.sort', (['raw[raw < 0.5]'], {}), '(raw[raw < 0.5])\n', (1675, 1691), True, 'import numpy as np\n'), ((1710, 1747), 'mars.tensor.datasource.tensor', 'tensor', (['raw[:, :, 0, 0]'], {'chunk_size': '(3)'}), '(raw[:, :, 0, 0], chunk_size=3)\n', (1716, 1747), False, 'from mars.tensor.datasource import tensor, arange\n'), ((7386, 7423), 'mars.tensor.datasource.tensor', 'tensor', (['raw[0, :, 0, 0]'], {'chunk_size': '(3)'}), '(raw[0, :, 0, 0], chunk_size=3)\n', (7392, 7423), False, 'from mars.tensor.datasource import tensor, arange\n'), ((7916, 7943), 'mars.tensor.datasource.tensor', 'tensor', (['b_raw'], {'chunk_size': '(2)'}), '(b_raw, chunk_size=2)\n', (7922, 7943), False, 'from mars.tensor.datasource import tensor, arange\n'), ((11540, 11570), 'mars.tensor.indexing.compress', 'compress', (['[0, 1, 1]', 'a'], {'axis': '(1)'}), '([0, 1, 1], a, axis=1)\n', (11548, 11570), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((11712, 11721), 'mars.tensor.mod', 'mod', (['a', '(3)'], {}), '(a, 3)\n', (11715, 11721), False, 'from mars.tensor import mod, stack, hstack\n'), ((13590, 13614), 'mars.tensor.indexing.unravel_index', 'unravel_index', (['a', '(7, 6)'], {}), '(a, (7, 6))\n', (13603, 13614), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((13707, 13745), 'numpy.unravel_index', 'np.unravel_index', (['[22, 41, 37]', '(7, 6)'], {}), '([22, 41, 37], (7, 6))\n', (13723, 13745), True, 'import numpy as np\n'), ((13955, 13965), 'mars.tensor.indexing.nonzero', 'nonzero', (['x'], {}), '(x)\n', (13962, 13965), False, 'from mars.tensor.indexing import take, compress, extract, choose, unravel_index, nonzero, flatnonzero\n'), ((14059, 14075), 'numpy.nonzero', 'np.nonzero', (['data'], {}), '(data)\n', (14069, 14075), True, 'import numpy as np\n'), ((14337, 14353), 'numpy.arange', 'np.arange', (['(-2)', '(3)'], {}), '(-2, 3)\n', (14346, 14353), True, 'import numpy as np\n'), ((1646, 1665), 'numpy.concatenate', 'np.concatenate', (['res'], {}), '(res)\n', (1660, 1665), True, 'import numpy as np\n'), ((6295, 6305), 'mars.tensor.datasource.arange', 'arange', (['(30)'], {}), '(30)\n', (6301, 6305), False, 'from mars.tensor.datasource import tensor, arange\n'), ((8669, 8718), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)'], {'size': '(shape[:-1] + (1,))'}), '(10, 20, size=shape[:-1] + (1,))\n', (8686, 8718), True, 'import numpy as np\n'), ((9297, 9310), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (9306, 9310), True, 'import numpy as np\n'), ((9469, 9482), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (9478, 9482), True, 'import numpy as np\n'), ((11623, 11636), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (11632, 11636), True, 'import numpy as np\n'), ((11855, 11870), 'numpy.mod', 'np.mod', (['data', '(3)'], {}), '(data, 3)\n', (11861, 11870), True, 'import numpy as np\n'), ((13146, 13162), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (13154, 13162), True, 'import numpy as np\n'), ((13195, 13214), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (13203, 13214), True, 'import numpy as np\n'), ((13247, 13277), 'numpy.array', 'np.array', (['[-1, -2, -3, -4, -5]'], {}), '([-1, -2, -3, -4, -5])\n', (13255, 13277), True, 'import numpy as np\n'), ((6441, 6454), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (6450, 6454), True, 'import numpy as np\n'), ((7737, 7755), 'numpy.prod', 'np.prod', (['new_shape'], {}), '(new_shape)\n', (7744, 7755), True, 'import numpy as np\n'), ((9040, 9084), 'numpy.dtype', 'np.dtype', (["[('a', np.int16), ('b', np.int64)]"], {}), "([('a', np.int16), ('b', np.int64)])\n", (9048, 9084), True, 'import numpy as np\n')] |
"""Shortest-Path graph kernel.
Python implementation based on: "Shortest-path kernels on graphs", by
<NAME>.; <NAME>., in Data Mining, Fifth IEEE
International Conference on , vol., no., pp.8 pp.-, 27-30 Nov. 2005
doi: 10.1109/ICDM.2005.132
Author : <NAME>, <NAME>
"""
import numpy as np
import networkx as nx
class GK_SP:
"""
Shorthest path graph kernel.
"""
def compare(self, g_1, g_2, verbose=False):
"""Compute the kernel value (similarity) between two graphs.
Parameters
----------
g1 : networkx.Graph
First graph.
g2 : networkx.Graph
Second graph.
Returns
-------
k : The similarity value between g1 and g2.
"""
# Diagonal superior matrix of the floyd warshall shortest
# paths:
fwm1 = np.array(nx.floyd_warshall_numpy(g_1))
fwm1 = np.where(fwm1 == np.inf, 0, fwm1)
fwm1 = np.where(fwm1 == np.nan, 0, fwm1)
fwm1 = np.triu(fwm1, k=1)
bc1 = np.bincount(fwm1.reshape(-1).astype(int))
fwm2 = np.array(nx.floyd_warshall_numpy(g_2))
fwm2 = np.where(fwm2 == np.inf, 0, fwm2)
fwm2 = np.where(fwm2 == np.nan, 0, fwm2)
fwm2 = np.triu(fwm2, k=1)
bc2 = np.bincount(fwm2.reshape(-1).astype(int))
# Copy into arrays with the same length the non-zero shortests
# paths:
v1 = np.zeros(max(len(bc1), len(bc2)) - 1)
v1[range(0, len(bc1)-1)] = bc1[1:]
v2 = np.zeros(max(len(bc1), len(bc2)) - 1)
v2[range(0, len(bc2)-1)] = bc2[1:]
return np.sum(v1 * v2)
def compare_normalized(self, g_1, g_2, verbose=False):
"""Compute the normalized kernel value between two graphs.
A normalized version of the kernel is given by the equation:
k_norm(g1, g2) = k(g1, g2) / sqrt(k(g1,g1) * k(g2,g2))
Parameters
----------
g1 : networkx.Graph
First graph.
g2 : networkx.Graph
Second graph.
Returns
-------
k : The similarity value between g1 and g2.
"""
return self.compare(g_1, g_2) / (np.sqrt(self.compare(g_1, g_1) *
self.compare(g_2, g_2)))
def compare_list(self, graph_list, verbose=False):
"""Compute the all-pairs kernel values for a list of graphs.
This function can be used to directly compute the kernel
matrix for a list of graphs. The direct computation of the
kernel matrix is faster than the computation of all individual
pairwise kernel values.
Parameters
----------
graph_list: list
A list of graphs (list of networkx graphs)
Return
------
K: numpy.array, shape = (len(graph_list), len(graph_list))
The similarity matrix of all graphs in graph_list.
"""
n = len(graph_list)
k = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
k[i, j] = self.compare(graph_list[i], graph_list[j])
k[j, i] = k[i, j]
k_norm = np.zeros(k.shape)
for i in range(k.shape[0]):
for j in range(k.shape[1]):
k_norm[i, j] = k[i, j] / np.sqrt(k[i, i] * k[j, j])
return k_norm
| [
"numpy.sqrt",
"numpy.where",
"networkx.floyd_warshall_numpy",
"numpy.sum",
"numpy.zeros",
"numpy.triu"
] | [((889, 922), 'numpy.where', 'np.where', (['(fwm1 == np.inf)', '(0)', 'fwm1'], {}), '(fwm1 == np.inf, 0, fwm1)\n', (897, 922), True, 'import numpy as np\n'), ((938, 971), 'numpy.where', 'np.where', (['(fwm1 == np.nan)', '(0)', 'fwm1'], {}), '(fwm1 == np.nan, 0, fwm1)\n', (946, 971), True, 'import numpy as np\n'), ((987, 1005), 'numpy.triu', 'np.triu', (['fwm1'], {'k': '(1)'}), '(fwm1, k=1)\n', (994, 1005), True, 'import numpy as np\n'), ((1132, 1165), 'numpy.where', 'np.where', (['(fwm2 == np.inf)', '(0)', 'fwm2'], {}), '(fwm2 == np.inf, 0, fwm2)\n', (1140, 1165), True, 'import numpy as np\n'), ((1181, 1214), 'numpy.where', 'np.where', (['(fwm2 == np.nan)', '(0)', 'fwm2'], {}), '(fwm2 == np.nan, 0, fwm2)\n', (1189, 1214), True, 'import numpy as np\n'), ((1230, 1248), 'numpy.triu', 'np.triu', (['fwm2'], {'k': '(1)'}), '(fwm2, k=1)\n', (1237, 1248), True, 'import numpy as np\n'), ((1599, 1614), 'numpy.sum', 'np.sum', (['(v1 * v2)'], {}), '(v1 * v2)\n', (1605, 1614), True, 'import numpy as np\n'), ((2957, 2973), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (2965, 2973), True, 'import numpy as np\n'), ((3156, 3173), 'numpy.zeros', 'np.zeros', (['k.shape'], {}), '(k.shape)\n', (3164, 3173), True, 'import numpy as np\n'), ((844, 872), 'networkx.floyd_warshall_numpy', 'nx.floyd_warshall_numpy', (['g_1'], {}), '(g_1)\n', (867, 872), True, 'import networkx as nx\n'), ((1087, 1115), 'networkx.floyd_warshall_numpy', 'nx.floyd_warshall_numpy', (['g_2'], {}), '(g_2)\n', (1110, 1115), True, 'import networkx as nx\n'), ((3291, 3317), 'numpy.sqrt', 'np.sqrt', (['(k[i, i] * k[j, j])'], {}), '(k[i, i] * k[j, j])\n', (3298, 3317), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
CalcCohx
function: calculate the longitudinal coherence
------------------------------------------------------------------------------------
Usage
Cohx,ConfigParameters = CalcCohx(ConfigParameters)
-----------------------------------------------------------------------------------
Inputs
ConfigParameters: -dict, configuration parameters
---------------------------------------------------------------------------------
Outputs
ConfigParameters: configuration parameters, -dict
Cohx: longitudinal coherence
- 3D array, with size of (Nplanes,Nplanes,number of freq)
--------------------------------------------------------------------------------
References
1.'Exp-UserDefined' uses the wind evolution model (Eq.4) and
'Exp-Simley' uses the wind evolution model (Eq.7) and in
# Simley, E., & Pao, L. Y. (2015).
# A longitudinal spatial coherence model for wind evolution based on large-eddy simulation.
# In 2015 American Control Conference (ACC) (pp. 3708–3714). IEEE.
# https://doi.org/10.1109/ACC.2015.7171906
This model is acquired from LES simulations.
2.'Kristensen' uses the wind evolution model (Eq.20) and G-function (Eq.29) in
# Kristensen, L. (1979).
# On longitudinal spectral coherence.
# Boundary-Layer Meteorology, 16(2), 145–153.
# https://doi.org/10.1007/BF02350508
This model is based on physical deduction.
3.'Exp-GPR' uses the wind evolution model (Eq.6) and
the GPR models case 15 for a and case 17 for b (Table5) in
# Chen, Y., Schlipf, D., & Cheng, P. W. (2021).
# Parameterization of wind evolution using lidar.
# Wind Energy Science, 6(1), 61–91.
# https://doi.org/10.5194/wes-6-61-2021
The GPR models are trained with measurement data from an onshore flat site.
Due to the limitation of the training data, it is not recommended to
use the GPR models for the cases where the separations between the unfrozen planes exceed 109 m.
----------------------------------------------------------------------------------------------------
Created on 20.06.2021
<NAME> (c) University of Stuttgart
<NAME> (c) Flensburg University of Applied Sciences
----------------------------------------------------------------------------------------------------
Modified
"""
# import libirary
from scipy.spatial.distance import cdist
import numpy as np
import math
# import scipy.io as sio
def CalcCohx(ConfigParameters):
# spatial distance x
X = np.reshape(ConfigParameters['Xpos'],(ConfigParameters['Nplanes'],1),order="F")
X = np.concatenate((X,0*X),axis=1)
r_x = np.reshape(cdist(X, X),(ConfigParameters['Nplanes']**2,1),order="F")
if ConfigParameters['EvoModel']!='Exp-UserDefined':
# calculate wind statistics to determine wind evolution model parameters
if ConfigParameters['TurbModel']=='Kaimal':
# Check Turbulence Class
# Iref: expected value of the turbulence intensity at 15 m/s. (IEC61400-1:2005 p.22)
# Note that IRef is defined as the mean value in this edition of the standard rather than as a representative value.
if ConfigParameters['TurbClass']=='A+':
Iref=0.18
elif ConfigParameters['TurbClass']=='A':
Iref=0.16
elif ConfigParameters['TurbClass']=='B':
Iref=0.14
elif ConfigParameters['TurbClass']=='C':
Iref=0.12
else:
raise ValueError('Wrong turbulence class. Please define IEC turbulence Class as A+, A, B, or C.')
# sigma_u: the representative value of the turbulence standard deviation,
# shall be given by the 90# quantile for the given hub height wind speed (IEC61400-1:2005 p.24)
sigma_u = Iref*(0.75*ConfigParameters['Uref']+5.6)
sigma_v = sigma_u*0.8
sigma_w = sigma_u*0.5
sigma_total = math.sqrt(sigma_u**2+sigma_v**2+sigma_w**2)
# Lambda = longitudinal turbulence scale parameter
if ConfigParameters['Href'] > 60:
Lambda = 42
else:
Lambda = 0.7*ConfigParameters['Href']
# Integral length scale
Lu = 8.1*Lambda
# save the parameters
ConfigParameters['sigma_u'] = sigma_u
ConfigParameters['sigma_v'] = sigma_v
ConfigParameters['sigma_w'] = sigma_w
ConfigParameters['L_u'] = Lu
elif ConfigParameters['TurbModel']=='Mann':
sigma_total = math.sqrt(ConfigParameters['sigma_u']**2+ConfigParameters['sigma_v']**2+ConfigParameters['sigma_w']**2)
Lu = ConfigParameters['L_u']
# coherence x
if ConfigParameters['EvoModel'] == 'Exp-UserDefined':
Cohx_squared = np.exp(-ConfigParameters['evo_a']*np.sqrt((ConfigParameters['f']*r_x/ConfigParameters['Uref'])**2+\
(ConfigParameters['evo_b']*r_x)**2))
elif ConfigParameters['EvoModel'] == 'Exp-Simley':
ConfigParameters['evo_a'] = 8.4*sigma_total/ConfigParameters['Uref']+0.05
ConfigParameters['evo_b'] = 0.25*Lu**(-1.24)
Cohx_squared = np.exp(-ConfigParameters['evo_a']*np.sqrt((ConfigParameters['f']*r_x/ConfigParameters['Uref'])**2+\
(ConfigParameters['evo_b']*r_x)**2))
elif ConfigParameters['EvoModel'] == 'Kristensen':
xi = ConfigParameters['f']*Lu/ConfigParameters['Uref']
alpha = sigma_total/ConfigParameters['Uref']*r_x/Lu
G = 33**(-2/3)*(33*xi)**2*(33*xi+3/11)**0.5/(33*xi+1)**(11/6)
m = 2*(alpha<=1)+1*(alpha>1)
Cohx_squared = np.exp(-2*alpha*G)*(1-np.exp(-1/(2*alpha**m*xi**2)))**2
# =============================================================================
# elif ConfigParameters['EvoModel'] == 'Exp-GPR':
# GPRmdl = sio.loadmat('ExpGPR.mat')
# predictor_a = struct2table(struct('V_long_mean',ConfigParameters.Uref,...
# 'V_vert_std',ConfigParameters.sigma_w,'DirError',0))
# predictor_b = struct2table(struct('V_long_mean',ConfigParameters.Uref*ones(size(r_x)),...
# 'V_long_TI_U',ConfigParameters.sigma_u/ConfigParameters.Uref*ones(size(r_x)),...
# 'V_long_skew',zeros(size(r_x)),'V_long_kurt',zeros(size(r_x)),...
# 'V_lat_skew',zeros(size(r_x)),'V_vert_skew',zeros(size(r_x)),...
# 'vlos_d',r_x))
# ConfigParameters.evo_a = predict(cgprMdl_a,predictor_a)
# ConfigParameters.evo_b = predict(cgprMdl_b,predictor_b)
# ConfigParameters.evo_b(predictor_b.vlos_d==0)=0
# Cohx_squared = exp(-sqrt(ConfigParameters.evo_a.^2.*(ConfigParameters.f.*r_x./ConfigParameters.Uref).^2+...
# ConfigParameters.evo_b.^2))
# =============================================================================
Cohx = np.reshape(np.sqrt(Cohx_squared),(ConfigParameters['Nplanes'],ConfigParameters['Nplanes'],len(ConfigParameters['f'])),order="F")
return Cohx,ConfigParameters
| [
"numpy.sqrt",
"numpy.reshape",
"scipy.spatial.distance.cdist",
"math.sqrt",
"numpy.exp",
"numpy.concatenate"
] | [((2602, 2687), 'numpy.reshape', 'np.reshape', (["ConfigParameters['Xpos']", "(ConfigParameters['Nplanes'], 1)"], {'order': '"""F"""'}), "(ConfigParameters['Xpos'], (ConfigParameters['Nplanes'], 1),\n order='F')\n", (2612, 2687), True, 'import numpy as np\n'), ((2690, 2724), 'numpy.concatenate', 'np.concatenate', (['(X, 0 * X)'], {'axis': '(1)'}), '((X, 0 * X), axis=1)\n', (2704, 2724), True, 'import numpy as np\n'), ((2743, 2754), 'scipy.spatial.distance.cdist', 'cdist', (['X', 'X'], {}), '(X, X)\n', (2748, 2754), False, 'from scipy.spatial.distance import cdist\n'), ((7299, 7320), 'numpy.sqrt', 'np.sqrt', (['Cohx_squared'], {}), '(Cohx_squared)\n', (7306, 7320), True, 'import numpy as np\n'), ((4143, 4196), 'math.sqrt', 'math.sqrt', (['(sigma_u ** 2 + sigma_v ** 2 + sigma_w ** 2)'], {}), '(sigma_u ** 2 + sigma_v ** 2 + sigma_w ** 2)\n', (4152, 4196), False, 'import math\n'), ((4827, 4944), 'math.sqrt', 'math.sqrt', (["(ConfigParameters['sigma_u'] ** 2 + ConfigParameters['sigma_v'] ** 2 + \n ConfigParameters['sigma_w'] ** 2)"], {}), "(ConfigParameters['sigma_u'] ** 2 + ConfigParameters['sigma_v'] **\n 2 + ConfigParameters['sigma_w'] ** 2)\n", (4836, 4944), False, 'import math\n'), ((5119, 5235), 'numpy.sqrt', 'np.sqrt', (["((ConfigParameters['f'] * r_x / ConfigParameters['Uref']) ** 2 + (\n ConfigParameters['evo_b'] * r_x) ** 2)"], {}), "((ConfigParameters['f'] * r_x / ConfigParameters['Uref']) ** 2 + (\n ConfigParameters['evo_b'] * r_x) ** 2)\n", (5126, 5235), True, 'import numpy as np\n'), ((5512, 5628), 'numpy.sqrt', 'np.sqrt', (["((ConfigParameters['f'] * r_x / ConfigParameters['Uref']) ** 2 + (\n ConfigParameters['evo_b'] * r_x) ** 2)"], {}), "((ConfigParameters['f'] * r_x / ConfigParameters['Uref']) ** 2 + (\n ConfigParameters['evo_b'] * r_x) ** 2)\n", (5519, 5628), True, 'import numpy as np\n'), ((5976, 5998), 'numpy.exp', 'np.exp', (['(-2 * alpha * G)'], {}), '(-2 * alpha * G)\n', (5982, 5998), True, 'import numpy as np\n'), ((5998, 6037), 'numpy.exp', 'np.exp', (['(-1 / (2 * alpha ** m * xi ** 2))'], {}), '(-1 / (2 * alpha ** m * xi ** 2))\n', (6004, 6037), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Functions to simulate background noise.
"""
import numpy as np
import bigfish.stack as stack
# TODO add illumination bias
def add_white_noise(image, noise_level, random_noise=0.05):
"""Generate and add white noise to an image.
Parameters
----------
image : np.ndarray, np.uint
Image with shape (z, y, x) or (y, x).
noise_level : int or float
Reference level of noise background to add in the image.
random_noise : int or float
Background noise follows a normal distribution around the provided
noise values. The scale used is scale = noise_level * random_noise
Returns
-------
noised_image : np.ndarray, np.uint
Noised image with shape (z, y, x) or (y, x).
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16])
stack.check_parameter(noise_level=(int, float),
random_noise=(int, float))
# compute scale
scale = noise_level * random_noise
# generate noise
noise = np.random.normal(loc=noise_level, scale=scale, size=image.size)
noise = np.reshape(noise, image.shape)
# add noise
noised_image = image.astype(np.float64) + noise
noised_image = np.clip(noised_image, 0, np.iinfo(image.dtype).max)
noised_image = noised_image.astype(image.dtype)
return noised_image
| [
"numpy.random.normal",
"bigfish.stack.check_array",
"numpy.reshape",
"numpy.iinfo",
"bigfish.stack.check_parameter"
] | [((855, 921), 'bigfish.stack.check_array', 'stack.check_array', (['image'], {'ndim': '[2, 3]', 'dtype': '[np.uint8, np.uint16]'}), '(image, ndim=[2, 3], dtype=[np.uint8, np.uint16])\n', (872, 921), True, 'import bigfish.stack as stack\n'), ((970, 1044), 'bigfish.stack.check_parameter', 'stack.check_parameter', ([], {'noise_level': '(int, float)', 'random_noise': '(int, float)'}), '(noise_level=(int, float), random_noise=(int, float))\n', (991, 1044), True, 'import bigfish.stack as stack\n'), ((1165, 1228), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'noise_level', 'scale': 'scale', 'size': 'image.size'}), '(loc=noise_level, scale=scale, size=image.size)\n', (1181, 1228), True, 'import numpy as np\n'), ((1241, 1271), 'numpy.reshape', 'np.reshape', (['noise', 'image.shape'], {}), '(noise, image.shape)\n', (1251, 1271), True, 'import numpy as np\n'), ((1385, 1406), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (1393, 1406), True, 'import numpy as np\n')] |
import numpy as np
def get_fft_harmonics(samples_per_window, sample_rate, one_sided=True):
"""
Works for odd and even number of points.
Does not return Nyquist, does return DC component
Could be midified with kwargs to support one_sided, two_sided, ignore_dc
ignore_nyquist, and etc. Could actally take FrequencyBands as an argument
if we wanted as well.
Parameters
----------
samples_per_window
sample_rate
Returns
-------
"""
n_fft_harmonics = int(samples_per_window / 2) # no bin at Nyquist,
harmonic_frequencies = np.fft.fftfreq(samples_per_window, d=1.0 / sample_rate)
harmonic_frequencies = harmonic_frequencies[0:n_fft_harmonics]
return harmonic_frequencies
| [
"numpy.fft.fftfreq"
] | [((585, 640), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['samples_per_window'], {'d': '(1.0 / sample_rate)'}), '(samples_per_window, d=1.0 / sample_rate)\n', (599, 640), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 20:01:02 2020
@author: Isaac
"""
import timeit
import numba
import numpy as np
from numba import njit
import time
@njit
def question_1(x):
"""
Solution to question 1 goes here
"""
A = np.array([[1.0, 3.0, 4.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]])
return np.linalg.matrix_power(A, x)
def question_1a(x):
"""
Solution to question 1 goes here
"""
A = np.array([[1.0, 3.0, 4.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]])
return np.linalg.matrix_power(A, x)
timeit.timeit("question_1(1000)", number = 100, globals = globals())
timeit.timeit("question_1a(1000)", number = 100, globals = globals()) | [
"numpy.array",
"numpy.linalg.matrix_power"
] | [((277, 338), 'numpy.array', 'np.array', (['[[1.0, 3.0, 4.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]]'], {}), '([[1.0, 3.0, 4.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]])\n', (285, 338), True, 'import numpy as np\n'), ((351, 379), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['A', 'x'], {}), '(A, x)\n', (373, 379), True, 'import numpy as np\n'), ((472, 533), 'numpy.array', 'np.array', (['[[1.0, 3.0, 4.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]]'], {}), '([[1.0, 3.0, 4.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]])\n', (480, 533), True, 'import numpy as np\n'), ((546, 574), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['A', 'x'], {}), '(A, x)\n', (568, 574), True, 'import numpy as np\n')] |
import argparse
import os
import pdb
import shutil
from timeit import default_timer as timer
import numpy as np
import pandas as pd
from tqdm import tqdm
from evaluation import write_submission
def iters_ensemble(args):
'''
Ensemble on different iterations and generate ensembled files in fusioned folder
'''
## directories
if args.task_type == 'sed_only':
# iterations ensemble directory
fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_mask_fusioned')
os.makedirs(fusioned_dir, exist_ok=True)
fusion_fn = '_fusion_sed_epoch_{}'
iterator = range(38, 42, 2)
elif args.task_type == 'two_staged_eval':
# iterations ensemble directory
fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'doa_fusioned')
os.makedirs(fusioned_dir, exist_ok=True)
fusion_fn = '_fusion_doa_epoch_{}'
iterator = range(78, 82, 2)
## average ensemble
print('\n===> Average ensemble')
ensemble_start_time = timer()
predicts_fusioned = []
for epoch_num in iterator:
fusion_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), fusion_fn.format(epoch_num))
for fn in sorted(os.listdir(fusion_dir)):
if fn.endswith('.csv') and not fn.startswith('.'):
fn_path = os.path.join(fusion_dir, fn)
predicts_fusioned.append(pd.read_csv(fn_path, header=0, index_col=0).values)
if len(predicts_fusioned) > file_num:
for n in range(file_num):
min_len = min(predicts_fusioned[n].shape[0], predicts_fusioned[n+file_num].shape[0])
predicts_fusioned[n] = (predicts_fusioned[n][:min_len,:] + predicts_fusioned[n+file_num][:min_len,:]) / 2
predicts_fusioned = predicts_fusioned[:file_num]
print('\nAverage ensemble time: {:.3f} s.'.format(timer()-ensemble_start_time))
## write the fusioned sed probabilities or doa predictions to fusioned files
print('\n===> Write the fusioned sed probabilities or doa predictions to fusioned files')
# this folder here is only used for supplying fn
iterator = tqdm(sorted(os.listdir(fusion_dir)), total=len(os.listdir(fusion_dir)), unit='iters')
n = 0
for fn in iterator:
if fn.endswith('.csv') and not fn.startswith('.'):
# write to sed_mask_fusioned folder
fn_path = os.path.join(fusioned_dir, fn)
df_output = pd.DataFrame(predicts_fusioned[n])
df_output.to_csv(fn_path)
n += 1
iterator.close()
print('\n' + fusioned_dir)
print('\n===> Iterations ensemble finished!')
def threshold_iters_ensemble(args):
'''
Threshold the ensembled iterations and write to submissions
'''
# directories
sed_mask_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_mask_fusioned')
doa_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'doa_fusioned')
if args.task_type == 'sed_only':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_test_fusioned')
elif args.task_type == 'two_staged_eval':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'all_test_fusioned')
os.makedirs(test_fusioned_dir, exist_ok=True)
if args.task_type == 'sed_only':
iterator = tqdm(sorted(os.listdir(sed_mask_fusioned_dir)), total=len(os.listdir(sed_mask_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_prob.csv') and not fn.startswith('.'):
fn_path = os.path.join(sed_mask_fusioned_dir, fn)
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to sed_test_fusioned
fn_noextension = fn.split('_prob')[0]
output_doas = np.zeros((prob_fusioned.shape[0],22))
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': output_doas
}
write_submission(submit_dict, test_fusioned_dir)
if args.task_type == 'two_staged_eval':
iterator = tqdm(sorted(os.listdir(doa_fusioned_dir)), total=len(os.listdir(doa_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_doa.csv') and not fn.startswith('.'):
fn_noextension = fn.split('_doa')[0]
# read sed predictions from sed_mask_fusioned directory
fn_path = os.path.join(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# read doa predictions from doa_fusioned directory
fn_path = os.path.join(doa_fusioned_dir, fn)
doa_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to all_test_fusioned
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': doa_fusioned
}
write_submission(submit_dict, test_fusioned_dir)
iterator.close()
print('\n' + test_fusioned_dir)
print('\n===> Threshold iterations ensemble finished!')
def models_ensemble(args):
'''
Ensemble on different iterations and generate ensembled files in fusioned folder
'''
# directories
if args.task_type == 'sed_only':
fusion_folder = 'sed_mask_fusioned'
fusioned_folder = 'sed_mask_models_fusioned'
elif args.task_type == 'two_staged_eval':
fusion_folder = 'doa_fusioned'
fusioned_folder = 'doa_models_fusioned'
print('\n===> Model average ensemble')
ensemble_start_time = timer()
predicts_fusioned = []
for model_folder in sorted(os.listdir(submissions_dir)):
if not model_folder.startswith('.') and model_folder != 'models_ensemble':
print('\n' + model_folder)
fusion_dir = os.path.join(submissions_dir, model_folder, fusion_folder)
for fn in sorted(os.listdir(fusion_dir)):
if fn.endswith('.csv') and not fn.startswith('.'):
fn_path = os.path.join(fusion_dir, fn)
predicts_fusioned.append(pd.read_csv(fn_path, header=0, index_col=0).values)
if len(predicts_fusioned) > file_num:
for n in range(file_num):
min_len = min(predicts_fusioned[n].shape[0], predicts_fusioned[n+file_num].shape[0])
predicts_fusioned[n] = (predicts_fusioned[n][:min_len,:] + predicts_fusioned[n+file_num][:min_len,:]) / 2
predicts_fusioned = predicts_fusioned[:file_num]
print('\nAverage ensemble time: {:.3f} s.'.format(timer()-ensemble_start_time))
## write the fusioned sed probabilities or doa predictions to fusioned files
print('\n===> Write the fusioned sed probabilities or doa predictions to fusioned files')
# this folder here is only used for supplying fn
iterator = tqdm(sorted(os.listdir(fusion_dir)), total=len(os.listdir(fusion_dir)), unit='iters')
models_ensemble_dir = os.path.join(submissions_dir, 'models_ensemble', fusioned_folder)
os.makedirs(models_ensemble_dir, exist_ok=True)
n = 0
for fn in iterator:
if fn.endswith('.csv') and not fn.startswith('.'):
# write to sed_mask_fusioned folder
fn_path = os.path.join(models_ensemble_dir, fn)
df_output = pd.DataFrame(predicts_fusioned[n])
df_output.to_csv(fn_path)
n += 1
iterator.close()
print('\n' + models_ensemble_dir)
print('\n===> Models ensemble finished!')
def threshold_models_ensemble(args):
'''
Threshold the ensembled models and write to submissions
'''
# directories
sed_mask_fusioned_dir = os.path.join(submissions_dir, 'models_ensemble', 'sed_mask_models_fusioned')
doa_fusioned_dir = os.path.join(submissions_dir, 'models_ensemble', 'doa_models_fusioned')
if args.task_type == 'sed_only':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_test_fusioned')
elif args.task_type == 'two_staged_eval':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'all_test_fusioned')
os.makedirs(test_fusioned_dir, exist_ok=True)
if args.task_type == 'sed_only':
iterator = tqdm(sorted(os.listdir(sed_mask_fusioned_dir)), total=len(os.listdir(sed_mask_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_prob.csv') and not fn.startswith('.'):
fn_path = os.path.join(sed_mask_fusioned_dir, fn)
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to sed_test_fusioned
fn_noextension = fn.split('_prob')[0]
output_doas = np.zeros((prob_fusioned.shape[0],22))
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': output_doas
}
write_submission(submit_dict, test_fusioned_dir)
if args.task_type == 'two_staged_eval':
iterator = tqdm(sorted(os.listdir(doa_fusioned_dir)), total=len(os.listdir(doa_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_doa.csv') and not fn.startswith('.'):
fn_noextension = fn.split('_doa')[0]
# read sed predictions from sed_mask_fusioned directory
fn_path = os.path.join(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# read doa predictions from doa_fusioned directory
fn_path = os.path.join(doa_fusioned_dir, fn)
doa_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to all_test_fusioned
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': doa_fusioned
}
write_submission(submit_dict, test_fusioned_dir)
iterator.close()
print('\n' + test_fusioned_dir)
print('\n===> Threshold models ensemble finished!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ensemble on different iterations or different models')
subparsers = parser.add_subparsers(dest='mode')
parser_iters_ensemble = subparsers.add_parser('iters_ensemble')
parser_iters_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_iters_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_iters_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_iters_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_iters_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_iters_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_iters_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_iters_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_iters_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_iters_ensemble = subparsers.add_parser('threshold_iters_ensemble')
parser_threshold_iters_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_threshold_iters_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_threshold_iters_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_threshold_iters_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_threshold_iters_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_threshold_iters_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_threshold_iters_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_threshold_iters_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_threshold_iters_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_iters_ensemble.add_argument('--threshold', default=0.3, type=float)
parser_models_ensemble = subparsers.add_parser('models_ensemble')
parser_models_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_models_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_models_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_models_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_models_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_models_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_models_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_models_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_models_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_models_ensemble = subparsers.add_parser('threshold_models_ensemble')
parser_threshold_models_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_threshold_models_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_threshold_models_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_threshold_models_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_threshold_models_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_threshold_models_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_threshold_models_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_threshold_models_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_threshold_models_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_models_ensemble.add_argument('--threshold', default=0.3, type=float)
args = parser.parse_args()
# submissions directory
global submissions_dir
submissions_dir = os.path.join(args.workspace, 'appendixes', 'submissions_eval')
global file_num
file_num = 100
# ensemble different iterations or models
if args.mode == 'iters_ensemble':
iters_ensemble(args)
elif args.mode == 'threshold_iters_ensemble':
threshold_iters_ensemble(args)
elif args.mode == 'models_ensemble':
models_ensemble(args)
elif args.mode == 'threshold_models_ensemble':
threshold_models_ensemble(args) | [
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"pandas.read_csv",
"timeit.default_timer",
"os.path.join",
"numpy.zeros",
"pandas.DataFrame",
"evaluation.write_submission"
] | [((1360, 1367), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1365, 1367), True, 'from timeit import default_timer as timer\n'), ((4391, 4436), 'os.makedirs', 'os.makedirs', (['test_fusioned_dir'], {'exist_ok': '(True)'}), '(test_fusioned_dir, exist_ok=True)\n', (4402, 4436), False, 'import os\n'), ((7067, 7074), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7072, 7074), True, 'from timeit import default_timer as timer\n'), ((8481, 8546), 'os.path.join', 'os.path.join', (['submissions_dir', '"""models_ensemble"""', 'fusioned_folder'], {}), "(submissions_dir, 'models_ensemble', fusioned_folder)\n", (8493, 8546), False, 'import os\n'), ((8551, 8598), 'os.makedirs', 'os.makedirs', (['models_ensemble_dir'], {'exist_ok': '(True)'}), '(models_ensemble_dir, exist_ok=True)\n', (8562, 8598), False, 'import os\n'), ((9195, 9271), 'os.path.join', 'os.path.join', (['submissions_dir', '"""models_ensemble"""', '"""sed_mask_models_fusioned"""'], {}), "(submissions_dir, 'models_ensemble', 'sed_mask_models_fusioned')\n", (9207, 9271), False, 'import os\n'), ((9295, 9366), 'os.path.join', 'os.path.join', (['submissions_dir', '"""models_ensemble"""', '"""doa_models_fusioned"""'], {}), "(submissions_dir, 'models_ensemble', 'doa_models_fusioned')\n", (9307, 9366), False, 'import os\n'), ((9967, 10012), 'os.makedirs', 'os.makedirs', (['test_fusioned_dir'], {'exist_ok': '(True)'}), '(test_fusioned_dir, exist_ok=True)\n', (9978, 10012), False, 'import os\n'), ((12183, 12279), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Ensemble on different iterations or different models"""'}), "(description=\n 'Ensemble on different iterations or different models')\n", (12206, 12279), False, 'import argparse\n'), ((17955, 18017), 'os.path.join', 'os.path.join', (['args.workspace', '"""appendixes"""', '"""submissions_eval"""'], {}), "(args.workspace, 'appendixes', 'submissions_eval')\n", (17967, 18017), False, 'import os\n'), ((685, 725), 'os.makedirs', 'os.makedirs', (['fusioned_dir'], {'exist_ok': '(True)'}), '(fusioned_dir, exist_ok=True)\n', (696, 725), False, 'import os\n'), ((7133, 7160), 'os.listdir', 'os.listdir', (['submissions_dir'], {}), '(submissions_dir)\n', (7143, 7160), False, 'import os\n'), ((1151, 1191), 'os.makedirs', 'os.makedirs', (['fusioned_dir'], {'exist_ok': '(True)'}), '(fusioned_dir, exist_ok=True)\n', (1162, 1191), False, 'import os\n'), ((1709, 1731), 'os.listdir', 'os.listdir', (['fusion_dir'], {}), '(fusion_dir)\n', (1719, 1731), False, 'import os\n'), ((2662, 2684), 'os.listdir', 'os.listdir', (['fusion_dir'], {}), '(fusion_dir)\n', (2672, 2684), False, 'import os\n'), ((2900, 2930), 'os.path.join', 'os.path.join', (['fusioned_dir', 'fn'], {}), '(fusioned_dir, fn)\n', (2912, 2930), False, 'import os\n'), ((2955, 2989), 'pandas.DataFrame', 'pd.DataFrame', (['predicts_fusioned[n]'], {}), '(predicts_fusioned[n])\n', (2967, 2989), True, 'import pandas as pd\n'), ((7310, 7368), 'os.path.join', 'os.path.join', (['submissions_dir', 'model_folder', 'fusion_folder'], {}), '(submissions_dir, model_folder, fusion_folder)\n', (7322, 7368), False, 'import os\n'), ((8380, 8402), 'os.listdir', 'os.listdir', (['fusion_dir'], {}), '(fusion_dir)\n', (8390, 8402), False, 'import os\n'), ((8763, 8800), 'os.path.join', 'os.path.join', (['models_ensemble_dir', 'fn'], {}), '(models_ensemble_dir, fn)\n', (8775, 8800), False, 'import os\n'), ((8825, 8859), 'pandas.DataFrame', 'pd.DataFrame', (['predicts_fusioned[n]'], {}), '(predicts_fusioned[n])\n', (8837, 8859), True, 'import pandas as pd\n'), ((1823, 1851), 'os.path.join', 'os.path.join', (['fusion_dir', 'fn'], {}), '(fusion_dir, fn)\n', (1835, 1851), False, 'import os\n'), ((2376, 2383), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2381, 2383), True, 'from timeit import default_timer as timer\n'), ((2697, 2719), 'os.listdir', 'os.listdir', (['fusion_dir'], {}), '(fusion_dir)\n', (2707, 2719), False, 'import os\n'), ((4506, 4539), 'os.listdir', 'os.listdir', (['sed_mask_fusioned_dir'], {}), '(sed_mask_fusioned_dir)\n', (4516, 4539), False, 'import os\n'), ((4724, 4763), 'os.path.join', 'os.path.join', (['sed_mask_fusioned_dir', 'fn'], {}), '(sed_mask_fusioned_dir, fn)\n', (4736, 4763), False, 'import os\n'), ((4977, 5015), 'numpy.zeros', 'np.zeros', (['(prob_fusioned.shape[0], 22)'], {}), '((prob_fusioned.shape[0], 22))\n', (4985, 5015), True, 'import numpy as np\n'), ((5250, 5298), 'evaluation.write_submission', 'write_submission', (['submit_dict', 'test_fusioned_dir'], {}), '(submit_dict, test_fusioned_dir)\n', (5266, 5298), False, 'from evaluation import write_submission\n'), ((5391, 5419), 'os.listdir', 'os.listdir', (['doa_fusioned_dir'], {}), '(doa_fusioned_dir)\n', (5401, 5419), False, 'import os\n'), ((5724, 5789), 'os.path.join', 'os.path.join', (['sed_mask_fusioned_dir', "(fn_noextension + '_prob.csv')"], {}), "(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')\n", (5736, 5789), False, 'import os\n'), ((5982, 6016), 'os.path.join', 'os.path.join', (['doa_fusioned_dir', 'fn'], {}), '(doa_fusioned_dir, fn)\n', (5994, 6016), False, 'import os\n'), ((6400, 6448), 'evaluation.write_submission', 'write_submission', (['submit_dict', 'test_fusioned_dir'], {}), '(submit_dict, test_fusioned_dir)\n', (6416, 6448), False, 'from evaluation import write_submission\n'), ((7399, 7421), 'os.listdir', 'os.listdir', (['fusion_dir'], {}), '(fusion_dir)\n', (7409, 7421), False, 'import os\n'), ((8090, 8097), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8095, 8097), True, 'from timeit import default_timer as timer\n'), ((8415, 8437), 'os.listdir', 'os.listdir', (['fusion_dir'], {}), '(fusion_dir)\n', (8425, 8437), False, 'import os\n'), ((10082, 10115), 'os.listdir', 'os.listdir', (['sed_mask_fusioned_dir'], {}), '(sed_mask_fusioned_dir)\n', (10092, 10115), False, 'import os\n'), ((10300, 10339), 'os.path.join', 'os.path.join', (['sed_mask_fusioned_dir', 'fn'], {}), '(sed_mask_fusioned_dir, fn)\n', (10312, 10339), False, 'import os\n'), ((10553, 10591), 'numpy.zeros', 'np.zeros', (['(prob_fusioned.shape[0], 22)'], {}), '((prob_fusioned.shape[0], 22))\n', (10561, 10591), True, 'import numpy as np\n'), ((10826, 10874), 'evaluation.write_submission', 'write_submission', (['submit_dict', 'test_fusioned_dir'], {}), '(submit_dict, test_fusioned_dir)\n', (10842, 10874), False, 'from evaluation import write_submission\n'), ((10967, 10995), 'os.listdir', 'os.listdir', (['doa_fusioned_dir'], {}), '(doa_fusioned_dir)\n', (10977, 10995), False, 'import os\n'), ((11300, 11365), 'os.path.join', 'os.path.join', (['sed_mask_fusioned_dir', "(fn_noextension + '_prob.csv')"], {}), "(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')\n", (11312, 11365), False, 'import os\n'), ((11558, 11592), 'os.path.join', 'os.path.join', (['doa_fusioned_dir', 'fn'], {}), '(doa_fusioned_dir, fn)\n', (11570, 11592), False, 'import os\n'), ((11976, 12024), 'evaluation.write_submission', 'write_submission', (['submit_dict', 'test_fusioned_dir'], {}), '(submit_dict, test_fusioned_dir)\n', (11992, 12024), False, 'from evaluation import write_submission\n'), ((4552, 4585), 'os.listdir', 'os.listdir', (['sed_mask_fusioned_dir'], {}), '(sed_mask_fusioned_dir)\n', (4562, 4585), False, 'import os\n'), ((4796, 4839), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (4807, 4839), True, 'import pandas as pd\n'), ((5432, 5460), 'os.listdir', 'os.listdir', (['doa_fusioned_dir'], {}), '(doa_fusioned_dir)\n', (5442, 5460), False, 'import os\n'), ((5822, 5865), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (5833, 5865), True, 'import pandas as pd\n'), ((6048, 6091), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (6059, 6091), True, 'import pandas as pd\n'), ((7521, 7549), 'os.path.join', 'os.path.join', (['fusion_dir', 'fn'], {}), '(fusion_dir, fn)\n', (7533, 7549), False, 'import os\n'), ((10128, 10161), 'os.listdir', 'os.listdir', (['sed_mask_fusioned_dir'], {}), '(sed_mask_fusioned_dir)\n', (10138, 10161), False, 'import os\n'), ((10372, 10415), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (10383, 10415), True, 'import pandas as pd\n'), ((11008, 11036), 'os.listdir', 'os.listdir', (['doa_fusioned_dir'], {}), '(doa_fusioned_dir)\n', (11018, 11036), False, 'import os\n'), ((11398, 11441), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (11409, 11441), True, 'import pandas as pd\n'), ((11624, 11667), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (11635, 11667), True, 'import pandas as pd\n'), ((1893, 1936), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (1904, 1936), True, 'import pandas as pd\n'), ((7595, 7638), 'pandas.read_csv', 'pd.read_csv', (['fn_path'], {'header': '(0)', 'index_col': '(0)'}), '(fn_path, header=0, index_col=0)\n', (7606, 7638), True, 'import pandas as pd\n')] |
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import numpy as np
from rbf import RbfGaussian
class RbfNetwork(object):
""" A RBF network is an advanced machine learning algorithm that uses a series of RBF functions to perform
regression. It can also perform classification by means of one-of-n encoding.
The long term memory of a RBF network is made up of the widths and centers of the RBF functions, as well as
input and output weighting.
http://en.wikipedia.org/wiki/RBF_network
"""
def __init__(self, input_count, rbf_count, output_count):
""" Create an RBF network with the specified shape.
@param input_count: The input count.
@param rbf_count: The RBF function count.
@param output_count: The output count.
"""
self.input_count = input_count
self.output_count = output_count
# calculate input and output weight counts
# add 1 to output to account for an extra bias node
input_weight_count = input_count * rbf_count
output_weight_count = (rbf_count + 1) * output_count
rbf_params = (input_count + 1) * rbf_count
self.long_term_memory = np.zeros((input_weight_count + output_weight_count + rbf_params), dtype=float)
self.index_input_weights = 0
self.index_output_weights = input_weight_count + rbf_params
self.rbf = {}
# default the Rbf's to gaussian
for i in xrange(0, rbf_count):
rbf_index = input_weight_count + ((input_count + 1) * i)
self.rbf[i] = RbfGaussian(input_count, self.long_term_memory, rbf_index)
def compute_regression(self, input):
""" Compute the output for the network.
@param input: The input pattern.
@return: The output pattern.
"""
# first, compute the output values of each of the RBFs
# Add in one additional RBF output for bias (always set to one).
rbf_output = [0] * (len(self.rbf) + 1)
# bias
rbf_output[len(rbf_output) - 1] = 1.0
for rbfIndex in xrange(0, len(self.rbf)):
# weight the input
weighted_input = [0] * len(input)
for inputIndex in xrange(0, len(input)):
memory_index = self.index_input_weights + (rbfIndex * self.input_count) + inputIndex
weighted_input[inputIndex] = input[inputIndex] * self.long_term_memory[memory_index]
# calculate the rbf
rbf_output[rbfIndex] = self.rbf[rbfIndex].evaluate(weighted_input)
# Second, calculate the output, which is the result of the weighted result of the RBF's.
result = [0] * self.output_count
for outputIndex in xrange(0, len(result)):
sum_value = 0
for rbfIndex in xrange(0, len(rbf_output)):
# add 1 to rbf length for bias
memory_index = self.index_output_weights + (outputIndex * (len(self.rbf) + 1)) + rbfIndex
sum_value += rbf_output[rbfIndex] * self.long_term_memory[memory_index]
result[outputIndex] = sum_value
# finally, return the result.
return result
def reset(self):
"""
Reset the network to a random state.
"""
for i in xrange(0, len(self.long_term_memory)):
self.long_term_memory[i] = np.random.uniform(0, 1)
def compure_classification(self, input):
""" Compute the output and return the index of the output with the largest value. This is the class that
the network recognized.
@param input: The input pattern.
@return:
"""
output = self.compute_regression(input)
return output.index(max(output))
def copy_memory(self, source):
""" Copy the specified vector into the long term memory of the network.
@param source: The source vector.
"""
for i in xrange(0, len(source)):
self.long_term_memory[i] = source[i] | [
"numpy.zeros",
"rbf.RbfGaussian",
"numpy.random.uniform"
] | [((2117, 2193), 'numpy.zeros', 'np.zeros', (['(input_weight_count + output_weight_count + rbf_params)'], {'dtype': 'float'}), '(input_weight_count + output_weight_count + rbf_params, dtype=float)\n', (2125, 2193), True, 'import numpy as np\n'), ((2500, 2558), 'rbf.RbfGaussian', 'RbfGaussian', (['input_count', 'self.long_term_memory', 'rbf_index'], {}), '(input_count, self.long_term_memory, rbf_index)\n', (2511, 2558), False, 'from rbf import RbfGaussian\n'), ((4285, 4308), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4302, 4308), True, 'import numpy as np\n')] |
# 用于推断
from config import MaskRcnnConfig
import modelibe
import tensorflow as tf
import skimage.io as io
import scipy.misc
import os
import numpy as np
import keras.backend.tensorflow_backend as KTF
from tqdm import tqdm
import cv2
import colorsys
from skimage.measure import find_contours
import argparse
class OurConfig(MaskRcnnConfig):
NUM_CLASSES = 23 # 根据自己的训练集类别。包含背景,所以为实际类别+1
DETECTION_MIN_CONFIDENCE = 0.5
RPN_NMS_THRESHOLD = 0.5
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
# random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
class MaskRcnn(object):
def init_app(self, model_path, class_names):
self.g = tf.Graph()
with self.g.as_default():
config = OurConfig()
self.model = modelibe.MaskRcnn(mode="inference", model_dir="log", config=config)
self.model.load_weights(model_path, by_name=True)
self.class_names = class_names
def predict(self, path, save_path=None):
with self.g.as_default():
images_batch = []
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
images_batch.append(img)
results = self.model.detect(images_batch, verbose=0)[0]
self.save_show_v2(save_path, img, results['rois'], results['masks'], results['class_ids'],
results['scores'])
return results
def save_show_v2(self, show_path, image, boxes, masks, class_ids,
scores=None, allow=None):
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
colors = random_colors(N)
for i in range(N):
class_id = class_ids[i]
if allow:
if class_id not in allow:
continue
color = np.array(list(colors[i]))[..., ::-1].tolist()
new_color = tuple(color)
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
# Label
score = scores[i] if scores is not None else None
# label = class_names[class_id]
label = self.class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
cv2.putText(image, caption, (x1, y1 + 8), font, 0.5, (255, 255, 255), 1)
# Mask
mask = masks[:, :, i]
image = apply_mask(image, mask, new_color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
verts = np.array([verts.astype(np.int32)])
image = cv2.polylines(image, verts, True, (0, 255, 0))
masked_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow("influence", masked_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if show_path:
cv2.imwrite(show_path, masked_image)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Mask R-CNN influence')
parser.add_argument('--model_path', required=True,
help='.h5 file ')
parser.add_argument('--img_path', required=True,
help='path to img')
parser.add_argument('--show_path', required=False, default=None)
args = parser.parse_args()
print("model: ", args.model_path)
print("img_path", args.img_path)
class_names = {}
maskrcnn = MaskRcnn()
maskrcnn.init_app(args.model_path,class_names)
maskrcnn.predict(args.img_path, save_path=args.show_path)
| [
"cv2.rectangle",
"tensorflow.Graph",
"cv2.imwrite",
"argparse.ArgumentParser",
"numpy.where",
"cv2.polylines",
"numpy.fliplr",
"colorsys.hsv_to_rgb",
"cv2.imshow",
"modelibe.MaskRcnn",
"cv2.putText",
"numpy.zeros",
"numpy.any",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"skimage.measure.... | [((4275, 4334), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mask R-CNN influence"""'}), "(description='Mask R-CNN influence')\n", (4298, 4334), False, 'import argparse\n'), ((975, 1069), 'numpy.where', 'np.where', (['(mask == 1)', '(image[:, :, c] * (1 - alpha) + alpha * color[c] * 255)', 'image[:, :, c]'], {}), '(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n', (983, 1069), True, 'import numpy as np\n'), ((1278, 1288), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1286, 1288), True, 'import tensorflow as tf\n'), ((4022, 4060), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (4034, 4060), False, 'import cv2\n'), ((4069, 4106), 'cv2.imshow', 'cv2.imshow', (['"""influence"""', 'masked_image'], {}), "('influence', masked_image)\n", (4079, 4106), False, 'import cv2\n'), ((4115, 4129), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4126, 4129), False, 'import cv2\n'), ((4138, 4161), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4159, 4161), False, 'import cv2\n'), ((1381, 1448), 'modelibe.MaskRcnn', 'modelibe.MaskRcnn', ([], {'mode': '"""inference"""', 'model_dir': '"""log"""', 'config': 'config'}), "(mode='inference', model_dir='log', config=config)\n", (1398, 1448), False, 'import modelibe\n'), ((1678, 1694), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1688, 1694), False, 'import cv2\n'), ((1713, 1749), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1725, 1749), False, 'import cv2\n'), ((2866, 2922), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(1)'], {}), '(image, (x1, y1), (x2, y2), (0, 255, 0), 1)\n', (2879, 2922), False, 'import cv2\n'), ((3230, 3302), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(x1, y1 + 8)', 'font', '(0.5)', '(255, 255, 255)', '(1)'], {}), '(image, caption, (x1, y1 + 8), font, 0.5, (255, 255, 255), 1)\n', (3241, 3302), False, 'import cv2\n'), ((3544, 3608), 'numpy.zeros', 'np.zeros', (['(mask.shape[0] + 2, mask.shape[1] + 2)'], {'dtype': 'np.uint8'}), '((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n', (3552, 3608), True, 'import numpy as np\n'), ((3692, 3723), 'skimage.measure.find_contours', 'find_contours', (['padded_mask', '(0.5)'], {}), '(padded_mask, 0.5)\n', (3705, 3723), False, 'from skimage.measure import find_contours\n'), ((4196, 4232), 'cv2.imwrite', 'cv2.imwrite', (['show_path', 'masked_image'], {}), '(show_path, masked_image)\n', (4207, 4232), False, 'import cv2\n'), ((750, 773), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*c'], {}), '(*c)\n', (769, 773), False, 'import colorsys\n'), ((2690, 2706), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (2696, 2706), True, 'import numpy as np\n'), ((3952, 3998), 'cv2.polylines', 'cv2.polylines', (['image', 'verts', '(True)', '(0, 255, 0)'], {}), '(image, verts, True, (0, 255, 0))\n', (3965, 3998), False, 'import cv2\n'), ((3848, 3864), 'numpy.fliplr', 'np.fliplr', (['verts'], {}), '(verts)\n', (3857, 3864), True, 'import numpy as np\n')] |
from __future__ import annotations
import logging
from pathlib import Path
from typing import Generator, List, Set, Union
import numpy as np
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from card_live_dashboard.model.CardLiveData import CardLiveData
from card_live_dashboard.model.data_modifiers.AddGeographicNamesModifier import AddGeographicNamesModifier
from card_live_dashboard.model.data_modifiers.AddTaxonomyModifier import AddTaxonomyModifier
from card_live_dashboard.model.data_modifiers.AntarcticaNAModifier import AntarcticaNAModifier
from card_live_dashboard.service import region_codes
from card_live_dashboard.service.CardLiveDataLoader import CardLiveDataLoader
logger = logging.getLogger(__name__)
class CardLiveDataManager:
INSTANCE = None
def __init__(self, cardlive_home: Path):
ncbi_db_path = cardlive_home / 'db' / 'taxa.sqlite'
card_live_data_dir = cardlive_home / 'data' / 'card_live'
self._data_loader = CardLiveDataLoader(card_live_data_dir)
self._data_loader.add_data_modifiers([
AntarcticaNAModifier(np.datetime64('2020-07-20')),
AddGeographicNamesModifier(region_codes),
AddTaxonomyModifier(ncbi_db_path),
])
self._card_live_data = self._data_loader.read_or_update_data()
self._scheduler = BackgroundScheduler(
jobstores={
'default': MemoryJobStore()
},
executors={
'default': ThreadPoolExecutor(1)
},
job_defaults={
'max_instances': 1
}
)
self._scheduler.add_job(self.update_job, 'interval', minutes=10)
self._scheduler.start()
def update_job(self):
logger.debug('Updating CARD:Live data.')
try:
new_data = self._data_loader.read_or_update_data(self._card_live_data)
if new_data is not self._card_live_data:
logger.debug(f'Old data has {len(self._card_live_data)} samples, new data has {len(new_data)} samples')
self._card_live_data = new_data
except Exception as e:
logger.info('An exeption occured when attempting to load new data. Skipping new data.')
logger.exception(e)
logger.debug('Finished updating CARD:Live data.')
def data_archive_generator(self, file_names: Union[List[str], Set[str]] = None) -> Generator[bytes, None, None]:
"""
Get the CARD:Live JSON files as a zipstream generator.
:param file_names: The file names to load into the archive.
:return: A generator which allows streaming of the zip file contents.
"""
if file_names is None:
file_names = self.card_data.files()
return self._data_loader.data_archive_generator(file_names)
@property
def card_data(self) -> CardLiveData:
return self._card_live_data
@classmethod
def create_instance(cls, cardlive_home: Path) -> None:
cls.INSTANCE = CardLiveDataManager(cardlive_home)
@classmethod
def get_instance(cls) -> CardLiveDataManager:
if cls.INSTANCE is not None:
return cls.INSTANCE
else:
raise Exception(f'{cls} does not yet have an instance.')
| [
"logging.getLogger",
"card_live_dashboard.model.data_modifiers.AddGeographicNamesModifier.AddGeographicNamesModifier",
"apscheduler.executors.pool.ThreadPoolExecutor",
"apscheduler.jobstores.memory.MemoryJobStore",
"numpy.datetime64",
"card_live_dashboard.service.CardLiveDataLoader.CardLiveDataLoader",
... | [((824, 851), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (841, 851), False, 'import logging\n'), ((1102, 1140), 'card_live_dashboard.service.CardLiveDataLoader.CardLiveDataLoader', 'CardLiveDataLoader', (['card_live_data_dir'], {}), '(card_live_data_dir)\n', (1120, 1140), False, 'from card_live_dashboard.service.CardLiveDataLoader import CardLiveDataLoader\n'), ((1263, 1303), 'card_live_dashboard.model.data_modifiers.AddGeographicNamesModifier.AddGeographicNamesModifier', 'AddGeographicNamesModifier', (['region_codes'], {}), '(region_codes)\n', (1289, 1303), False, 'from card_live_dashboard.model.data_modifiers.AddGeographicNamesModifier import AddGeographicNamesModifier\n'), ((1317, 1350), 'card_live_dashboard.model.data_modifiers.AddTaxonomyModifier.AddTaxonomyModifier', 'AddTaxonomyModifier', (['ncbi_db_path'], {}), '(ncbi_db_path)\n', (1336, 1350), False, 'from card_live_dashboard.model.data_modifiers.AddTaxonomyModifier import AddTaxonomyModifier\n'), ((1221, 1248), 'numpy.datetime64', 'np.datetime64', (['"""2020-07-20"""'], {}), "('2020-07-20')\n", (1234, 1248), True, 'import numpy as np\n'), ((1534, 1550), 'apscheduler.jobstores.memory.MemoryJobStore', 'MemoryJobStore', ([], {}), '()\n', (1548, 1550), False, 'from apscheduler.jobstores.memory import MemoryJobStore\n'), ((1617, 1638), 'apscheduler.executors.pool.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(1)'], {}), '(1)\n', (1635, 1638), False, 'from apscheduler.executors.pool import ThreadPoolExecutor\n')] |
from copy import deepcopy
import random
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import trange
from dataset_helpers import get_dataloaders
from experiment_config import (
Config,
DatasetSubsetType,
HParams,
State,
EvaluationMetrics,
OptimizerType,
)
from logs import BaseLogger, Printer
from measures import get_all_measures
from models import NiN
from experiment_config import ComplexityType as CT
class Experiment:
def __init__(
self,
state: State,
device: torch.device,
hparams: HParams,
config: Config,
logger: BaseLogger,
result_save_callback: Optional[object] = None
):
self.state = state
self.device = device
self.hparams = hparams
self.config = config
# Random Seeds
random.seed(self.hparams.seed)
np.random.seed(self.hparams.seed)
torch.manual_seed(self.hparams.seed)
torch.cuda.manual_seed_all(self.hparams.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Logging
self.logger = logger
# Printing
self.printer = Printer(self.config.id, self.config.verbosity)
self.result_save_callback = result_save_callback
# Model
self.model = NiN(self.hparams.model_depth, self.hparams.model_width, self.hparams.base_width,
self.hparams.batch_norm, self.hparams.dropout_prob, self.hparams.dataset_type)
print(self.model)
print("Number of parameters", sum(p.numel() for p in self.model.parameters() if p.requires_grad))
self.model.to(device)
self.init_model = deepcopy(self.model)
# Optimizer
if self.hparams.optimizer_type == OptimizerType.SGD:
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.hparams.lr,
weight_decay=hparams.weight_decay)
elif self.hparams.optimizer_type == OptimizerType.SGD_MOMENTUM:
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.hparams.lr, momentum=0.9,
weight_decay=hparams.weight_decay)
elif self.hparams.optimizer_type == OptimizerType.ADAM:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.hparams.lr,
weight_decay=hparams.weight_decay)
else:
raise KeyError
# Load data
self.train_loader, self.train_eval_loader, self.test_loader = get_dataloaders(self.hparams, self.config,
self.device)
self.train_history = []
def save_state(self, file_name: str = '') -> None:
checkpoint_file = self.config.checkpoint_dir / (file_name + '.pt')
torch.save({
'config': self.hparams,
'state': self.state,
'model': self.model.state_dict(),
'init_model': self.init_model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'np_rng': np.random.get_state(),
'torch_rng': torch.get_rng_state(),
}, checkpoint_file)
def _train_epoch(self) -> None:
self.model.train()
ce_check_batches = [(len(self.train_loader) // (2 ** (self.state.ce_check_freq))) * (i + 1) for i in
range(2 ** (self.state.ce_check_freq) - 1)]
ce_check_batches.append(len(self.train_loader) - 1)
batch_losses = []
for batch_idx, (data, target) in enumerate(self.train_loader):
data, target = data.to(self.device, non_blocking=True), target.to(self.device, non_blocking=True)
self.state.batch = batch_idx
self.state.global_batch = (self.state.epoch - 1) * len(self.train_loader) + self.state.batch
if self.state.global_batch in self.config.log_steps:
train_eval = self.evaluate_batch(DatasetSubsetType.TRAIN, True, batch_losses)
val_eval = self.evaluate_batch(DatasetSubsetType.TEST)
train_eval.all_complexities[CT.VALIDATION_ACC] = val_eval.acc
self.logger.log_generalization_gap(self.state, train_eval.acc, val_eval.acc, train_eval.avg_loss,
val_eval.avg_loss, train_eval.all_complexities)
self.model.train()
self.optimizer.zero_grad()
logits = self.model(data)
cross_entropy = F.cross_entropy(logits, target)
cross_entropy.backward()
loss = cross_entropy.clone()
batch_losses.append(loss)
self.model.train()
self.optimizer.step()
# Log everything
self.printer.batch_end(self.config, self.state, data, self.train_loader, loss)
self.logger.log_batch_end(self.config, self.state, cross_entropy, loss)
# Cross-entropy stopping check
if batch_idx == ce_check_batches[0]:
ce_check_batches.pop(0)
dataset_ce = self.evaluate_cross_entropy(DatasetSubsetType.TRAIN)[0]
if dataset_ce < self.hparams.ce_target:
self.state.converged = True
else:
while len(self.state.ce_check_milestones) > 0 and dataset_ce <= self.state.ce_check_milestones[0]:
passed_milestone = self.state.ce_check_milestones[0]
print(f'passed ce milestone {passed_milestone}')
self.state.ce_check_milestones.pop(0)
self.state.ce_check_freq += 1
if self.config.save_state_epochs is not None:
self.save_state(f'_ce_{passed_milestone}')
if self.state.converged:
break
self.train_history.append(sum(batch_losses)/len(batch_losses))
self.evaluate_cross_entropy(DatasetSubsetType.TRAIN, True)
self.evaluate_cross_entropy(DatasetSubsetType.TEST, True)
def train(self) -> None:
self.printer.train_start(self.device)
train_eval, val_eval = None, None
self.state.global_batch = 0
for epoch in trange(self.state.epoch, self.hparams.epochs + 1, disable=(not self.config.use_tqdm)):
self.state.epoch = epoch
self._train_epoch()
is_evaluation_epoch = (
epoch == 1 or epoch == self.hparams.epochs or epoch % self.config.log_epoch_freq == 0)
if is_evaluation_epoch or self.state.converged:
train_eval = self.evaluate(DatasetSubsetType.TRAIN, True)
val_eval = self.evaluate(DatasetSubsetType.TEST)
train_eval.all_complexities[CT.VALIDATION_ACC] = val_eval.acc
self.logger.log_generalization_gap(self.state, train_eval.acc, val_eval.acc, train_eval.avg_loss,
val_eval.avg_loss, train_eval.all_complexities)
self.printer.epoch_metrics(epoch, train_eval, val_eval)
if self.state.epoch > 300 and val_eval.acc > 0.99:
self.state.converged = True
if epoch == self.hparams.epochs or self.state.converged:
self.result_save_callback(epoch, val_eval, train_eval)
# Save state
is_save_epoch = self.config.save_state_epochs is not None and (
epoch in self.config.save_state_epochs or epoch == self.hparams.epochs or self.state.converged)
if is_save_epoch:
self.save_state(f"epoch_{epoch}")
if self.state.converged:
print('Converged')
break
self.printer.train_end()
if train_eval is None or val_eval is None:
raise RuntimeError
@torch.no_grad()
def evaluate_batch(self, dataset_subset_type: DatasetSubsetType, compute_all_measures: bool = False,
batches=None) -> EvaluationMetrics:
self.model.eval()
data_loader = [self.train_eval_loader, self.test_loader][dataset_subset_type]
cross_entropy_loss, acc, num_correct = self.evaluate_cross_entropy(dataset_subset_type)
all_complexities = {}
if dataset_subset_type == DatasetSubsetType.TRAIN and compute_all_measures:
all_complexities = get_all_measures(self.model, self.init_model, data_loader, acc, self.train_history,
self.hparams.seed)
all_complexities[CT.SOTL] = sum(batches)
all_complexities[CT.SOTL_10] = sum(batches[-10::])
self.logger.log_epoch_end(self.hparams, self.state, dataset_subset_type, cross_entropy_loss, acc, 0)
return EvaluationMetrics(acc, cross_entropy_loss, num_correct, len(data_loader.dataset), all_complexities)
@torch.no_grad()
def evaluate(self, dataset_subset_type: DatasetSubsetType, compute_all_measures: bool = False) -> EvaluationMetrics:
self.model.eval()
data_loader = [self.train_eval_loader, self.test_loader][dataset_subset_type]
cross_entropy_loss, acc, num_correct = self.evaluate_cross_entropy(dataset_subset_type)
all_complexities = {}
if dataset_subset_type == DatasetSubsetType.TRAIN and compute_all_measures:
all_complexities = get_all_measures(self.model, self.init_model, data_loader, acc, self.train_history,
self.hparams.seed)
self.logger.log_epoch_end(self.hparams, self.state, dataset_subset_type, cross_entropy_loss, acc,
self.train_history[-1] if dataset_subset_type == DatasetSubsetType.TRAIN else None)
return EvaluationMetrics(acc, cross_entropy_loss, num_correct, len(data_loader.dataset), all_complexities)
@torch.no_grad()
def evaluate_cross_entropy(self, dataset_subset_type: DatasetSubsetType, log: bool = False):
self.model.eval()
cross_entropy_loss = 0
num_correct = 0
data_loader = [self.train_eval_loader, self.test_loader][dataset_subset_type]
num_to_evaluate_on = len(data_loader.dataset)
for data, target in data_loader:
data, target = data.to(self.device, non_blocking=True), target.to(self.device, non_blocking=True)
logits = self.model(data)
cross_entropy = F.cross_entropy(logits, target, reduction='sum')
cross_entropy_loss += cross_entropy.item() # sum up batch loss
pred = logits.data.max(1, keepdim=True)[1] # get the index of the max logits
batch_correct = pred.eq(target.data.view_as(pred)).type(torch.FloatTensor).cpu()
num_correct += batch_correct.sum()
cross_entropy_loss /= num_to_evaluate_on
acc = num_correct.item() / num_to_evaluate_on
if log:
self.logger.log_epoch_end(self.hparams, self.state, dataset_subset_type, cross_entropy_loss, acc,
None if (dataset_subset_type != DatasetSubsetType.TRAIN or len(self.train_history) == 0)
else self.train_history[-1])
print("Cross entropy loss: {}".format(cross_entropy_loss))
print("Accuracy: {}".format(acc))
print("Num correct: {}".format(num_correct))
return cross_entropy_loss, acc, num_correct
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"numpy.random.get_state",
"measures.get_all_measures",
"models.NiN",
"random.seed",
"dataset_helpers.get_dataloaders",
"torch.get_rng_state",
"numpy.random.seed",
"logs.Printer",
"copy.deepcopy",
"torch.nn.functional.cross_entropy",
"torch.n... | [((8040, 8055), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8053, 8055), False, 'import torch\n'), ((9074, 9089), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9087, 9089), False, 'import torch\n'), ((10064, 10079), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10077, 10079), False, 'import torch\n'), ((905, 935), 'random.seed', 'random.seed', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (916, 935), False, 'import random\n'), ((944, 977), 'numpy.random.seed', 'np.random.seed', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (958, 977), True, 'import numpy as np\n'), ((986, 1022), 'torch.manual_seed', 'torch.manual_seed', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (1003, 1022), False, 'import torch\n'), ((1031, 1076), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (1057, 1076), False, 'import torch\n'), ((1264, 1310), 'logs.Printer', 'Printer', (['self.config.id', 'self.config.verbosity'], {}), '(self.config.id, self.config.verbosity)\n', (1271, 1310), False, 'from logs import BaseLogger, Printer\n'), ((1406, 1575), 'models.NiN', 'NiN', (['self.hparams.model_depth', 'self.hparams.model_width', 'self.hparams.base_width', 'self.hparams.batch_norm', 'self.hparams.dropout_prob', 'self.hparams.dataset_type'], {}), '(self.hparams.model_depth, self.hparams.model_width, self.hparams.\n base_width, self.hparams.batch_norm, self.hparams.dropout_prob, self.\n hparams.dataset_type)\n', (1409, 1575), False, 'from models import NiN\n'), ((1779, 1799), 'copy.deepcopy', 'deepcopy', (['self.model'], {}), '(self.model)\n', (1787, 1799), False, 'from copy import deepcopy\n'), ((2676, 2731), 'dataset_helpers.get_dataloaders', 'get_dataloaders', (['self.hparams', 'self.config', 'self.device'], {}), '(self.hparams, self.config, self.device)\n', (2691, 2731), False, 'from dataset_helpers import get_dataloaders\n'), ((6400, 6488), 'tqdm.trange', 'trange', (['self.state.epoch', '(self.hparams.epochs + 1)'], {'disable': '(not self.config.use_tqdm)'}), '(self.state.epoch, self.hparams.epochs + 1, disable=not self.config.\n use_tqdm)\n', (6406, 6488), False, 'from tqdm import trange\n'), ((4668, 4699), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'target'], {}), '(logits, target)\n', (4683, 4699), True, 'import torch.nn.functional as F\n'), ((8575, 8682), 'measures.get_all_measures', 'get_all_measures', (['self.model', 'self.init_model', 'data_loader', 'acc', 'self.train_history', 'self.hparams.seed'], {}), '(self.model, self.init_model, data_loader, acc, self.\n train_history, self.hparams.seed)\n', (8591, 8682), False, 'from measures import get_all_measures\n'), ((9566, 9673), 'measures.get_all_measures', 'get_all_measures', (['self.model', 'self.init_model', 'data_loader', 'acc', 'self.train_history', 'self.hparams.seed'], {}), '(self.model, self.init_model, data_loader, acc, self.\n train_history, self.hparams.seed)\n', (9582, 9673), False, 'from measures import get_all_measures\n'), ((10617, 10665), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'target'], {'reduction': '"""sum"""'}), "(logits, target, reduction='sum')\n", (10632, 10665), True, 'import torch.nn.functional as F\n'), ((3249, 3270), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (3268, 3270), True, 'import numpy as np\n'), ((3297, 3318), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (3316, 3318), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
import numpy as np
import utils
def plot_glimpse(config, images, locations, preds, labels, step, animate):
"""
For each image in images, draws bounding boxes
corresponding to glimpse locations.
First glimpse is colored green, intermediate
glimpses are orange, and terminal glimpse is red.
Constructs a .gif animation showing glimpses extracted
(Needs ImageMagick). Switchable with animate boolean parameter.
Saves image with overlaid bounding boxes to a local dir.
"""
batch_size, img_h, img_w, channels = images.shape
num_glimpses = config.num_glimpses
img_idx_range = config.verbose
object_labels = config.object_labels
# animation writer
writer = animation.ImageMagickWriter(fps=15, bitrate=1800)
# factors used to correct location and bounding box center
hw = img_h / 2
g_size = config.glimpse_size
for img_idx in range(img_idx_range):
utils.make_dir(config.image_dir_name + 'step{}/image{}'.format(step, img_idx))
fig, ax = plt.subplots(1, 2)
if animate:
glimpse_fig = plt.figure()
img = np.squeeze(images[img_idx])
glimpse_bboxes = []
# map locations from [-1, 1] to [0, 28] image space
locations[img_idx] = (locations[img_idx] * hw) + 14
ax[0].imshow(img, cmap='Greys', interpolation='none')
for glimpse in range(num_glimpses):
if animate:
glimpse_ax = glimpse_fig.add_subplot(111, label='glimpse{}'.format(glimpse))
glimpse_ax.imshow(img, cmap='Greys', interpolation='none')
location = locations[img_idx, glimpse]
if (glimpse == 0):
color = 'green'
elif (glimpse == num_glimpses - 1):
color = 'red'
else:
color = 'orange'
if animate:
glimpse_bbox = create_bbox((location[0] - g_size/2, location[1] - g_size/2), g_size, g_size, color=color)
glimpse_ax.add_patch(glimpse_bbox)
glimpse_bboxes.append([glimpse_ax])
bbox = create_bbox((location[0] - g_size/2, location[1] - g_size/2), g_size, g_size, color=color)
ax[0].add_patch(bbox)
if animate:
glimpse_anim_path = config.image_dir_name + 'step{}/image{}/glimpse_anim.gif'.format(step, img_idx)
anim = animation.ArtistAnimation(glimpse_fig, glimpse_bboxes, interval=1000, repeat_delay=2000)
anim.save(glimpse_anim_path, writer='imagemagick')
plt.close(glimpse_fig)
# Plot probability bar chart
label_pos = np.arange(len(object_labels))
preds_max_idx = np.argmax(preds[img_idx])
prediction = preds[img_idx, preds_max_idx]
prob = utils.truncate(prediction, 4)
label_max_idx = np.argmax(labels[img_idx])
if preds_max_idx == label_max_idx:
color = 'green'
else:
color = 'red'
ax[1].set(adjustable='box')
ax[1].bar(label_pos, preds[img_idx], align='center')
ax[1].set_xticks(label_pos)
ax[1].set_xticklabels(object_labels)
ax[1].set_ybound(lower=0., upper=1.)
ax[1].set_title('Prediction {} with prob {}, label {}'.format(preds_max_idx, prob, label_max_idx), color=color)
png_name = config.image_dir_name + 'step{}/image{}/full_plot.png'.format(step, img_idx)
fig.savefig(png_name, bbox_inches='tight')
plt.close(fig)
def create_bbox(xy, width, height, color='green', linewidth=1.5, alpha=1):
return patches.Rectangle(xy, width, height, fill=False, edgecolor=color, linewidth=linewidth, alpha=alpha)
| [
"matplotlib.patches.Rectangle",
"numpy.argmax",
"numpy.squeeze",
"matplotlib.pyplot.close",
"matplotlib.animation.ArtistAnimation",
"matplotlib.pyplot.figure",
"matplotlib.animation.ImageMagickWriter",
"utils.truncate",
"matplotlib.pyplot.subplots"
] | [((832, 881), 'matplotlib.animation.ImageMagickWriter', 'animation.ImageMagickWriter', ([], {'fps': '(15)', 'bitrate': '(1800)'}), '(fps=15, bitrate=1800)\n', (859, 881), True, 'import matplotlib.animation as animation\n'), ((3713, 3817), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['xy', 'width', 'height'], {'fill': '(False)', 'edgecolor': 'color', 'linewidth': 'linewidth', 'alpha': 'alpha'}), '(xy, width, height, fill=False, edgecolor=color, linewidth\n =linewidth, alpha=alpha)\n', (3730, 3817), True, 'import matplotlib.patches as patches\n'), ((1146, 1164), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1158, 1164), True, 'import matplotlib.pyplot as plt\n'), ((1238, 1265), 'numpy.squeeze', 'np.squeeze', (['images[img_idx]'], {}), '(images[img_idx])\n', (1248, 1265), True, 'import numpy as np\n'), ((2824, 2849), 'numpy.argmax', 'np.argmax', (['preds[img_idx]'], {}), '(preds[img_idx])\n', (2833, 2849), True, 'import numpy as np\n'), ((2916, 2945), 'utils.truncate', 'utils.truncate', (['prediction', '(4)'], {}), '(prediction, 4)\n', (2930, 2945), False, 'import utils\n'), ((2972, 2998), 'numpy.argmax', 'np.argmax', (['labels[img_idx]'], {}), '(labels[img_idx])\n', (2981, 2998), True, 'import numpy as np\n'), ((3611, 3625), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3620, 3625), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1223), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1221, 1223), True, 'import matplotlib.pyplot as plt\n'), ((2517, 2609), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['glimpse_fig', 'glimpse_bboxes'], {'interval': '(1000)', 'repeat_delay': '(2000)'}), '(glimpse_fig, glimpse_bboxes, interval=1000,\n repeat_delay=2000)\n', (2542, 2609), True, 'import matplotlib.animation as animation\n'), ((2681, 2703), 'matplotlib.pyplot.close', 'plt.close', (['glimpse_fig'], {}), '(glimpse_fig)\n', (2690, 2703), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
# This file is part of CBI Toolbox.
#
# CBI Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the 3-Clause BSD License.
#
# CBI Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# 3-Clause BSD License for more details.
#
# You should have received a copy of the 3-Clause BSD License along
# with CBI Toolbox. If not, see https://opensource.org/licenses/BSD-3-Clause.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import numpy as np
import json
from cbi_toolbox.reconstruct import psnr
nas = (30, 50, 80)
dnas = np.arange(10, 101, 5)
norm = 'mse'
path = os.environ['OVC_PATH']
npath = os.path.join(path, 'noise')
gpath = os.path.join(path, 'graph')
ref = np.load(os.path.join(path, 'arrays', 'phantom.npy'))
radon = np.load(os.path.join(npath, 'iradon.npy'))
results = {
'fss': {},
'fps': {},
'dc': {},
'fdc': {},
}
fss_snr = results['fss']
fps_snr = results['fps']
dc_snr = results['dc']
fdc_snr = results['fdc']
results['radon'] = psnr(ref, radon, norm)
del radon
for na in nas:
fss = np.load(os.path.join(npath, 'fssopt_{:03d}.npy'.format(na)))
fss_snr[na] = psnr(ref, fss, norm)
del fss
fps = np.load(os.path.join(npath, 'fpsopt_{:03d}.npy'.format(na)))
fps_snr[na] = psnr(ref, fps, norm)
del fps
dc_snr[na] = []
fdc_snr[na] = []
for dna in dnas:
dc = np.load(os.path.join(npath, '{:03d}_{:03d}.npy'.format(na, dna)))
dc_snr[na].append(psnr(ref, dc, norm))
fdc = np.load(os.path.join(
npath, '{:03d}_{:03d}f.npy'.format(na, dna)))
fdc_snr[na].append(psnr(ref, fdc, norm))
with open(os.path.join(gpath, 'noise.json'), 'w') as fp:
json.dump(results, fp)
| [
"cbi_toolbox.reconstruct.psnr",
"json.dump",
"os.path.join",
"numpy.arange"
] | [((795, 816), 'numpy.arange', 'np.arange', (['(10)', '(101)', '(5)'], {}), '(10, 101, 5)\n', (804, 816), True, 'import numpy as np\n'), ((870, 897), 'os.path.join', 'os.path.join', (['path', '"""noise"""'], {}), "(path, 'noise')\n", (882, 897), False, 'import os\n'), ((906, 933), 'os.path.join', 'os.path.join', (['path', '"""graph"""'], {}), "(path, 'graph')\n", (918, 933), False, 'import os\n'), ((1238, 1260), 'cbi_toolbox.reconstruct.psnr', 'psnr', (['ref', 'radon', 'norm'], {}), '(ref, radon, norm)\n', (1242, 1260), False, 'from cbi_toolbox.reconstruct import psnr\n'), ((949, 992), 'os.path.join', 'os.path.join', (['path', '"""arrays"""', '"""phantom.npy"""'], {}), "(path, 'arrays', 'phantom.npy')\n", (961, 992), False, 'import os\n'), ((1010, 1043), 'os.path.join', 'os.path.join', (['npath', '"""iradon.npy"""'], {}), "(npath, 'iradon.npy')\n", (1022, 1043), False, 'import os\n'), ((1377, 1397), 'cbi_toolbox.reconstruct.psnr', 'psnr', (['ref', 'fss', 'norm'], {}), '(ref, fss, norm)\n', (1381, 1397), False, 'from cbi_toolbox.reconstruct import psnr\n'), ((1500, 1520), 'cbi_toolbox.reconstruct.psnr', 'psnr', (['ref', 'fps', 'norm'], {}), '(ref, fps, norm)\n', (1504, 1520), False, 'from cbi_toolbox.reconstruct import psnr\n'), ((1930, 1952), 'json.dump', 'json.dump', (['results', 'fp'], {}), '(results, fp)\n', (1939, 1952), False, 'import json\n'), ((1879, 1912), 'os.path.join', 'os.path.join', (['gpath', '"""noise.json"""'], {}), "(gpath, 'noise.json')\n", (1891, 1912), False, 'import os\n'), ((1702, 1721), 'cbi_toolbox.reconstruct.psnr', 'psnr', (['ref', 'dc', 'norm'], {}), '(ref, dc, norm)\n', (1706, 1721), False, 'from cbi_toolbox.reconstruct import psnr\n'), ((1845, 1865), 'cbi_toolbox.reconstruct.psnr', 'psnr', (['ref', 'fdc', 'norm'], {}), '(ref, fdc, norm)\n', (1849, 1865), False, 'from cbi_toolbox.reconstruct import psnr\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Python implementation of the method described in [#a]_ and [#b]_ for
calculating Fourier coefficients for characterizing
closed contours.
References
----------
.. [#a] <NAME> and <NAME>, “Elliptic Fourier Features of a
Closed Contour," Computer Vision, Graphics and Image Processing,
Vol. 18, pp. 236-258, 1982.
.. [#b] <NAME>, <NAME> and <NAME>, “Feature Extraction
Methods for Character Recognition - A Survey”, Pattern Recognition
Vol. 29, No.4, pp. 641-662, 1996
Created by hbldh <<EMAIL>> on 2016-01-30.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
try:
_range = xrange
except NameError:
_range = range
def elliptic_fourier_descriptors(contour, order=10, normalize=False):
"""Calculate elliptical Fourier descriptors for a contour.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param int order: The order of Fourier coefficients to calculate.
:param bool normalize: If the coefficients should be normalized;
see references for details.
:return: A ``[order x 4]`` array of Fourier coefficients.
:rtype: :py:class:`numpy.ndarray`
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0.]), np.cumsum(dt)])
T = t[-1]
phi = (2 * np.pi * t) / T
orders = np.arange(1, order + 1)
consts = T / (2 * orders * orders * np.pi * np.pi)
phi = phi * orders.reshape((order, -1))
d_cos_phi = np.cos(phi[:, 1:]) - np.cos(phi[:, :-1])
d_sin_phi = np.sin(phi[:, 1:]) - np.sin(phi[:, :-1])
cos_phi = (dxy[:, 0] / dt) * d_cos_phi
a = consts * np.sum(cos_phi, axis=1)
b = consts * np.sum((dxy[:, 0] / dt) * d_sin_phi, axis=1)
c = consts * np.sum((dxy[:, 1] / dt) * d_cos_phi, axis=1)
d = consts * np.sum((dxy[:, 1] / dt) * d_sin_phi, axis=1)
coeffs = np.concatenate(
[
a.reshape((order, 1)),
b.reshape((order, 1)),
c.reshape((order, 1)),
d.reshape((order, 1)),
],
axis=1,
)
if normalize:
coeffs = normalize_efd(coeffs)
return coeffs
def normalize_efd(coeffs, size_invariant=True, include_features=False):
"""Normalizes an array of Fourier coefficients.
See [#a]_ and [#b]_ for details.
:param numpy.ndarray coeffs: A ``[n x 4]`` Fourier coefficient array.
:param bool size_invariant: If size invariance normalizing should be done as well.
Default is ``True``.
:return: The normalized ``[n x 4]`` Fourier coefficient array.
:rtype: :py:class:`numpy.ndarray`
"""
# Make the coefficients have a zero phase shift from
# the first major axis. Theta_1 is that shift angle.
theta_1 = 0.5 * np.arctan2(
2 * ((coeffs[0, 0] * coeffs[0, 1]) + (coeffs[0, 2] * coeffs[0, 3])),
(
(coeffs[0, 0] ** 2)
- (coeffs[0, 1] ** 2)
+ (coeffs[0, 2] ** 2)
- (coeffs[0, 3] ** 2)
),
)
# Rotate all coefficients by theta_1.
for n in _range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = np.dot(
np.array(
[
[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]],
]
),
np.array(
[
[np.cos(n * theta_1), -np.sin(n * theta_1)],
[np.sin(n * theta_1), np.cos(n * theta_1)],
]
),
).flatten()
# Make the coefficients rotation invariant by rotating so that
# the semi-major axis is parallel to the x-axis.
psi_1 = np.arctan2(coeffs[0, 2], coeffs[0, 0])
psi_rotation_matrix = np.array(
[[np.cos(psi_1), np.sin(psi_1)], [-np.sin(psi_1), np.cos(psi_1)]]
)
# Rotate all coefficients by -psi_1.
for n in _range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = psi_rotation_matrix.dot(
np.array(
[
[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]],
]
)
).flatten()
if size_invariant:
# Obtain size-invariance by normalizing.
coeffs /= np.abs(coeffs[0, 0])
if include_features:
return coeffs, theta_1
return coeffs
def calculate_dc_coefficients(contour):
"""Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0.]), np.cumsum(dt)])
T = t[-1]
xi = np.cumsum(dxy[:, 0]) - (dxy[:, 0] / dt) * t[1:]
A0 = (1 / T) * np.sum(((dxy[:, 0] / (2 * dt)) * np.diff(t ** 2)) + xi * dt)
delta = np.cumsum(dxy[:, 1]) - (dxy[:, 1] / dt) * t[1:]
C0 = (1 / T) * np.sum(((dxy[:, 1] / (2 * dt))
* np.diff(t ** 2)) + delta * dt)
# A0 and CO relate to the first point of the contour array as origin.
# Adding those values to the coefficients to make them relate to true origin.
return contour[0, 0] + A0, contour[0, 1] + C0
def elliptic_fourier_features(contour, order=10, include_rotation=True, include_size=True, include_location=True):
"""Generate features from a contour using EFD. Features are normailsed as specified.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param bool include_rotation: If information about the rotation of the contour should be included in the features.
Default is ``True``.
:param bool size_invariant: If information about the size of the contour should be included in the features.
Default is ``True``.
:param bool include_location: If information about the location of the contour should be included in the features.
Default is ``True``.
"""
coeffs = elliptic_fourier_descriptors(
contour, order=order, normalize=False)
normalized, theta = normalize_efd(
coeffs, size_invariant=(not include_size), include_features=True)
features = normalized.flatten()
# Remove entries no longer requiered after removing rotation from features
if include_size:
features = np.delete(features, [1, 2])
else:
features = np.delete(features, [0, 1, 2])
if include_rotation:
features = np.append(features, theta)
if include_location:
A0, C0 = calculate_dc_coefficients(contour)
features = np.append(features, [A0, C0])
return features
def reconstruct_contour_from_features(features, order=10, include_rotation=True, include_size=True, include_location=True):
"""Returns the contour specified by the features. If rotation, location are specified, these paramters are additionally returned
"""
coeffs = features
if include_size:
coeffs = np.insert(coeffs, [1, 1], [0, 0])
else:
coeffs = np.insert(coeffs, [0, 0, 0], [1, 0, 0])
if include_location:
[A0, C0] = coeffs[-2:]
coeffs = coeffs[:-2]
if include_rotation:
theta = coeffs[-1:]
coeffs = coeffs[:-1]
coeffs = coeffs.reshape(order, 4)
if include_location and include_rotation:
return coeffs, theta, [A0, C0]
if include_rotation:
return coeffs, theta
if include_location:
return coeffs, [A0, C0]
return coeffs
def reconstruct_contour(coeffs, locus=(0, 0), num_points=300):
"""Returns the contour specified by the coefficients.
:param coeffs: A ``[n x 4]`` Fourier coefficient array.
:type coeffs: numpy.ndarray
:param locus: The :math:`A_0` and :math:`C_0` elliptic locus in [#a]_ and [#b]_.
:type locus: list, tuple or numpy.ndarray
:param num_points: The number of sample points used for reconstructing the contour from the EFD.
:type num_points: int
:return: A list of x,y coordinates for the reconstructed contour.
:rtype: numpy.ndarray
"""
t = np.linspace(0, 1.0, num_points)
# Append extra dimension to enable element-wise broadcasted multiplication
coeffs = coeffs.reshape(coeffs.shape[0], coeffs.shape[1], 1)
orders = coeffs.shape[0]
orders = np.arange(1, orders + 1).reshape(-1, 1)
order_phases = 2 * orders * np.pi * t.reshape(1, -1)
xt_all = coeffs[:, 0] * np.cos(order_phases) + \
coeffs[:, 1] * np.sin(order_phases)
yt_all = coeffs[:, 2] * np.cos(order_phases) + \
coeffs[:, 3] * np.sin(order_phases)
xt_all = xt_all.sum(axis=0)
yt_all = yt_all.sum(axis=0)
xt_all = xt_all + np.ones((num_points,)) * locus[0]
yt_all = yt_all + np.ones((num_points,)) * locus[1]
reconstruction = np.stack([xt_all, yt_all], axis=1)
return reconstruction
def plot_efd(coeffs, locus=(0., 0.), image=None, contour=None, n=300):
"""Plot a ``[2 x (N / 2)]`` grid of successive truncations of the series.
.. note::
Requires `matplotlib <http://matplotlib.org/>`_!
:param numpy.ndarray coeffs: ``[N x 4]`` Fourier coefficient array.
:param list, tuple or numpy.ndarray locus:
The :math:`A_0` and :math:`C_0` elliptic locus in [#a]_ and [#b]_.
:param int n: Number of points to use for plotting of Fourier series.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print("Cannot plot: matplotlib was not installed.")
return
N = coeffs.shape[0]
N_half = int(np.ceil(N / 2))
n_rows = 2
t = np.linspace(0, 1.0, n)
xt = np.ones((n,)) * locus[0]
yt = np.ones((n,)) * locus[1]
for n in _range(coeffs.shape[0]):
xt += (coeffs[n, 0] * np.cos(2 * (n + 1) * np.pi * t)) + (
coeffs[n, 1] * np.sin(2 * (n + 1) * np.pi * t)
)
yt += (coeffs[n, 2] * np.cos(2 * (n + 1) * np.pi * t)) + (
coeffs[n, 3] * np.sin(2 * (n + 1) * np.pi * t)
)
ax = plt.subplot2grid((n_rows, N_half), (n // N_half, n % N_half))
ax.set_title(str(n + 1))
if contour is not None:
ax.plot(contour[:, 1], contour[:, 0], "c--", linewidth=2)
ax.plot(yt, xt, "r", linewidth=2)
if image is not None:
ax.imshow(image, plt.cm.gray)
plt.show()
| [
"numpy.insert",
"numpy.abs",
"numpy.ceil",
"numpy.ones",
"numpy.delete",
"numpy.diff",
"numpy.append",
"numpy.stack",
"numpy.linspace",
"numpy.sum",
"numpy.arctan2",
"numpy.cos",
"numpy.array",
"numpy.sin",
"numpy.cumsum",
"matplotlib.pyplot.subplot2grid",
"numpy.arange",
"matplotl... | [((1317, 1341), 'numpy.diff', 'np.diff', (['contour'], {'axis': '(0)'}), '(contour, axis=0)\n', (1324, 1341), True, 'import numpy as np\n'), ((1490, 1513), 'numpy.arange', 'np.arange', (['(1)', '(order + 1)'], {}), '(1, order + 1)\n', (1499, 1513), True, 'import numpy as np\n'), ((3806, 3844), 'numpy.arctan2', 'np.arctan2', (['coeffs[0, 2]', 'coeffs[0, 0]'], {}), '(coeffs[0, 2], coeffs[0, 0])\n', (3816, 3844), True, 'import numpy as np\n'), ((4799, 4823), 'numpy.diff', 'np.diff', (['contour'], {'axis': '(0)'}), '(contour, axis=0)\n', (4806, 4823), True, 'import numpy as np\n'), ((8278, 8309), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'num_points'], {}), '(0, 1.0, num_points)\n', (8289, 8309), True, 'import numpy as np\n'), ((8988, 9022), 'numpy.stack', 'np.stack', (['[xt_all, yt_all]'], {'axis': '(1)'}), '([xt_all, yt_all], axis=1)\n', (8996, 9022), True, 'import numpy as np\n'), ((9781, 9803), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'n'], {}), '(0, 1.0, n)\n', (9792, 9803), True, 'import numpy as np\n'), ((10512, 10522), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10520, 10522), True, 'import matplotlib.pyplot as plt\n'), ((1629, 1647), 'numpy.cos', 'np.cos', (['phi[:, 1:]'], {}), '(phi[:, 1:])\n', (1635, 1647), True, 'import numpy as np\n'), ((1650, 1669), 'numpy.cos', 'np.cos', (['phi[:, :-1]'], {}), '(phi[:, :-1])\n', (1656, 1669), True, 'import numpy as np\n'), ((1686, 1704), 'numpy.sin', 'np.sin', (['phi[:, 1:]'], {}), '(phi[:, 1:])\n', (1692, 1704), True, 'import numpy as np\n'), ((1707, 1726), 'numpy.sin', 'np.sin', (['phi[:, :-1]'], {}), '(phi[:, :-1])\n', (1713, 1726), True, 'import numpy as np\n'), ((1787, 1810), 'numpy.sum', 'np.sum', (['cos_phi'], {'axis': '(1)'}), '(cos_phi, axis=1)\n', (1793, 1810), True, 'import numpy as np\n'), ((1828, 1870), 'numpy.sum', 'np.sum', (['(dxy[:, 0] / dt * d_sin_phi)'], {'axis': '(1)'}), '(dxy[:, 0] / dt * d_sin_phi, axis=1)\n', (1834, 1870), True, 'import numpy as np\n'), ((1890, 1932), 'numpy.sum', 'np.sum', (['(dxy[:, 1] / dt * d_cos_phi)'], {'axis': '(1)'}), '(dxy[:, 1] / dt * d_cos_phi, axis=1)\n', (1896, 1932), True, 'import numpy as np\n'), ((1952, 1994), 'numpy.sum', 'np.sum', (['(dxy[:, 1] / dt * d_sin_phi)'], {'axis': '(1)'}), '(dxy[:, 1] / dt * d_sin_phi, axis=1)\n', (1958, 1994), True, 'import numpy as np\n'), ((2890, 3053), 'numpy.arctan2', 'np.arctan2', (['(2 * (coeffs[0, 0] * coeffs[0, 1] + coeffs[0, 2] * coeffs[0, 3]))', '(coeffs[0, 0] ** 2 - coeffs[0, 1] ** 2 + coeffs[0, 2] ** 2 - coeffs[0, 3] ** 2)'], {}), '(2 * (coeffs[0, 0] * coeffs[0, 1] + coeffs[0, 2] * coeffs[0, 3]),\n coeffs[0, 0] ** 2 - coeffs[0, 1] ** 2 + coeffs[0, 2] ** 2 - coeffs[0, 3\n ] ** 2)\n', (2900, 3053), True, 'import numpy as np\n'), ((4398, 4418), 'numpy.abs', 'np.abs', (['coeffs[0, 0]'], {}), '(coeffs[0, 0])\n', (4404, 4418), True, 'import numpy as np\n'), ((4937, 4957), 'numpy.cumsum', 'np.cumsum', (['dxy[:, 0]'], {}), '(dxy[:, 0])\n', (4946, 4957), True, 'import numpy as np\n'), ((5077, 5097), 'numpy.cumsum', 'np.cumsum', (['dxy[:, 1]'], {}), '(dxy[:, 1])\n', (5086, 5097), True, 'import numpy as np\n'), ((6528, 6555), 'numpy.delete', 'np.delete', (['features', '[1, 2]'], {}), '(features, [1, 2])\n', (6537, 6555), True, 'import numpy as np\n'), ((6585, 6615), 'numpy.delete', 'np.delete', (['features', '[0, 1, 2]'], {}), '(features, [0, 1, 2])\n', (6594, 6615), True, 'import numpy as np\n'), ((6661, 6687), 'numpy.append', 'np.append', (['features', 'theta'], {}), '(features, theta)\n', (6670, 6687), True, 'import numpy as np\n'), ((6785, 6814), 'numpy.append', 'np.append', (['features', '[A0, C0]'], {}), '(features, [A0, C0])\n', (6794, 6814), True, 'import numpy as np\n'), ((7165, 7198), 'numpy.insert', 'np.insert', (['coeffs', '[1, 1]', '[0, 0]'], {}), '(coeffs, [1, 1], [0, 0])\n', (7174, 7198), True, 'import numpy as np\n'), ((7226, 7265), 'numpy.insert', 'np.insert', (['coeffs', '[0, 0, 0]', '[1, 0, 0]'], {}), '(coeffs, [0, 0, 0], [1, 0, 0])\n', (7235, 7265), True, 'import numpy as np\n'), ((9741, 9755), 'numpy.ceil', 'np.ceil', (['(N / 2)'], {}), '(N / 2)\n', (9748, 9755), True, 'import numpy as np\n'), ((9813, 9826), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (9820, 9826), True, 'import numpy as np\n'), ((9847, 9860), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (9854, 9860), True, 'import numpy as np\n'), ((10196, 10257), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(n_rows, N_half)', '(n // N_half, n % N_half)'], {}), '((n_rows, N_half), (n // N_half, n % N_half))\n', (10212, 10257), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1428), 'numpy.cumsum', 'np.cumsum', (['dt'], {}), '(dt)\n', (1424, 1428), True, 'import numpy as np\n'), ((4897, 4910), 'numpy.cumsum', 'np.cumsum', (['dt'], {}), '(dt)\n', (4906, 4910), True, 'import numpy as np\n'), ((8497, 8521), 'numpy.arange', 'np.arange', (['(1)', '(orders + 1)'], {}), '(1, orders + 1)\n', (8506, 8521), True, 'import numpy as np\n'), ((8623, 8643), 'numpy.cos', 'np.cos', (['order_phases'], {}), '(order_phases)\n', (8629, 8643), True, 'import numpy as np\n'), ((8671, 8691), 'numpy.sin', 'np.sin', (['order_phases'], {}), '(order_phases)\n', (8677, 8691), True, 'import numpy as np\n'), ((8720, 8740), 'numpy.cos', 'np.cos', (['order_phases'], {}), '(order_phases)\n', (8726, 8740), True, 'import numpy as np\n'), ((8768, 8788), 'numpy.sin', 'np.sin', (['order_phases'], {}), '(order_phases)\n', (8774, 8788), True, 'import numpy as np\n'), ((8876, 8898), 'numpy.ones', 'np.ones', (['(num_points,)'], {}), '((num_points,))\n', (8883, 8898), True, 'import numpy as np\n'), ((8932, 8954), 'numpy.ones', 'np.ones', (['(num_points,)'], {}), '((num_points,))\n', (8939, 8954), True, 'import numpy as np\n'), ((3891, 3904), 'numpy.cos', 'np.cos', (['psi_1'], {}), '(psi_1)\n', (3897, 3904), True, 'import numpy as np\n'), ((3906, 3919), 'numpy.sin', 'np.sin', (['psi_1'], {}), '(psi_1)\n', (3912, 3919), True, 'import numpy as np\n'), ((3939, 3952), 'numpy.cos', 'np.cos', (['psi_1'], {}), '(psi_1)\n', (3945, 3952), True, 'import numpy as np\n'), ((9941, 9972), 'numpy.cos', 'np.cos', (['(2 * (n + 1) * np.pi * t)'], {}), '(2 * (n + 1) * np.pi * t)\n', (9947, 9972), True, 'import numpy as np\n'), ((10005, 10036), 'numpy.sin', 'np.sin', (['(2 * (n + 1) * np.pi * t)'], {}), '(2 * (n + 1) * np.pi * t)\n', (10011, 10036), True, 'import numpy as np\n'), ((10077, 10108), 'numpy.cos', 'np.cos', (['(2 * (n + 1) * np.pi * t)'], {}), '(2 * (n + 1) * np.pi * t)\n', (10083, 10108), True, 'import numpy as np\n'), ((10141, 10172), 'numpy.sin', 'np.sin', (['(2 * (n + 1) * np.pi * t)'], {}), '(2 * (n + 1) * np.pi * t)\n', (10147, 10172), True, 'import numpy as np\n'), ((3274, 3364), 'numpy.array', 'np.array', (['[[coeffs[n - 1, 0], coeffs[n - 1, 1]], [coeffs[n - 1, 2], coeffs[n - 1, 3]]]'], {}), '([[coeffs[n - 1, 0], coeffs[n - 1, 1]], [coeffs[n - 1, 2], coeffs[n -\n 1, 3]]])\n', (3282, 3364), True, 'import numpy as np\n'), ((3924, 3937), 'numpy.sin', 'np.sin', (['psi_1'], {}), '(psi_1)\n', (3930, 3937), True, 'import numpy as np\n'), ((4111, 4201), 'numpy.array', 'np.array', (['[[coeffs[n - 1, 0], coeffs[n - 1, 1]], [coeffs[n - 1, 2], coeffs[n - 1, 3]]]'], {}), '([[coeffs[n - 1, 0], coeffs[n - 1, 1]], [coeffs[n - 1, 2], coeffs[n -\n 1, 3]]])\n', (4119, 4201), True, 'import numpy as np\n'), ((5037, 5052), 'numpy.diff', 'np.diff', (['(t ** 2)'], {}), '(t ** 2)\n', (5044, 5052), True, 'import numpy as np\n'), ((5204, 5219), 'numpy.diff', 'np.diff', (['(t ** 2)'], {}), '(t ** 2)\n', (5211, 5219), True, 'import numpy as np\n'), ((3512, 3531), 'numpy.cos', 'np.cos', (['(n * theta_1)'], {}), '(n * theta_1)\n', (3518, 3531), True, 'import numpy as np\n'), ((3577, 3596), 'numpy.sin', 'np.sin', (['(n * theta_1)'], {}), '(n * theta_1)\n', (3583, 3596), True, 'import numpy as np\n'), ((3598, 3617), 'numpy.cos', 'np.cos', (['(n * theta_1)'], {}), '(n * theta_1)\n', (3604, 3617), True, 'import numpy as np\n'), ((3534, 3553), 'numpy.sin', 'np.sin', (['(n * theta_1)'], {}), '(n * theta_1)\n', (3540, 3553), True, 'import numpy as np\n')] |
import sys
_str = sys.argv[1]
import coopihc
from coopihc.space import StateElement, Space, State
import numpy
x = StateElement(
values=1,
spaces=Space([numpy.array([-1.0]).reshape(1, 1), numpy.array([1.0]).reshape(1, 1)]),
)
y = StateElement(values=2, spaces=Space(numpy.array([1, 2, 3], dtype=numpy.int)))
z = StateElement(
values=5, spaces=Space(numpy.array([i for i in range(10)], dtype=numpy.int))
)
s1 = State(substate_x=x, substate_y=y, substate_z=z)
w = StateElement(
values=numpy.zeros((3, 3)),
spaces=Space([-3.5 * numpy.ones((3, 3)), 6 * numpy.ones((3, 3))]),
)
s1["substate_w"] = w
xx = StateElement(
values=numpy.ones((2, 2)),
spaces=Space([-0.5 * numpy.ones((2, 2)), 0.5 * numpy.ones((2, 2))]),
clipping_mode="clip",
)
yy = StateElement(
values=None, spaces=Space(numpy.array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]))
)
s2 = State(**{"substate_xx": xx, "substate_yy": yy})
S = State()
S["substate1"] = s1
S["substate2"] = s2
if _str == "reset" or _str == "all":
print(S.reset())
if _str == "flat" or _str == "all":
print(S.flat())
if _str == "repr" or _str == "all":
print(S)
if _str == "filter" or _str == "all":
from collections import OrderedDict
ordereddict = OrderedDict(
{"substate1": OrderedDict({"substate_x": 0, "substate_w": 0})}
)
ns1 = S.filter("values", filterdict=ordereddict)
ns2 = S.filter("spaces", filterdict=ordereddict)
ns5 = S.filter("values")
ns6 = S.filter("spaces")
if _str == "copy" or _str == "all":
import copy
import time
start = time.time()
for i in range(1000):
_copy = copy.copy(S)
mid = time.time()
for i in range(1000):
_deepcopy = copy.deepcopy(S)
end = time.time()
print(mid - start)
print(end - start)
if _str == "serialize" or _str == "all":
print(S.serialize())
| [
"collections.OrderedDict",
"numpy.ones",
"coopihc.space.State",
"numpy.array",
"numpy.zeros",
"copy.deepcopy",
"copy.copy",
"time.time"
] | [((430, 477), 'coopihc.space.State', 'State', ([], {'substate_x': 'x', 'substate_y': 'y', 'substate_z': 'z'}), '(substate_x=x, substate_y=y, substate_z=z)\n', (435, 477), False, 'from coopihc.space import StateElement, Space, State\n'), ((882, 929), 'coopihc.space.State', 'State', ([], {}), "(**{'substate_xx': xx, 'substate_yy': yy})\n", (887, 929), False, 'from coopihc.space import StateElement, Space, State\n'), ((935, 942), 'coopihc.space.State', 'State', ([], {}), '()\n', (940, 942), False, 'from coopihc.space import StateElement, Space, State\n'), ((1588, 1599), 'time.time', 'time.time', ([], {}), '()\n', (1597, 1599), False, 'import time\n'), ((1665, 1676), 'time.time', 'time.time', ([], {}), '()\n', (1674, 1676), False, 'import time\n'), ((1750, 1761), 'time.time', 'time.time', ([], {}), '()\n', (1759, 1761), False, 'import time\n'), ((508, 527), 'numpy.zeros', 'numpy.zeros', (['(3, 3)'], {}), '((3, 3))\n', (519, 527), False, 'import numpy\n'), ((654, 672), 'numpy.ones', 'numpy.ones', (['(2, 2)'], {}), '((2, 2))\n', (664, 672), False, 'import numpy\n'), ((1642, 1654), 'copy.copy', 'copy.copy', (['S'], {}), '(S)\n', (1651, 1654), False, 'import copy\n'), ((1723, 1739), 'copy.deepcopy', 'copy.deepcopy', (['S'], {}), '(S)\n', (1736, 1739), False, 'import copy\n'), ((279, 318), 'numpy.array', 'numpy.array', (['[1, 2, 3]'], {'dtype': 'numpy.int'}), '([1, 2, 3], dtype=numpy.int)\n', (290, 318), False, 'import numpy\n'), ((825, 871), 'numpy.array', 'numpy.array', (['[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]'], {}), '([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6])\n', (836, 871), False, 'import numpy\n'), ((1284, 1331), 'collections.OrderedDict', 'OrderedDict', (["{'substate_x': 0, 'substate_w': 0}"], {}), "({'substate_x': 0, 'substate_w': 0})\n", (1295, 1331), False, 'from collections import OrderedDict\n'), ((554, 572), 'numpy.ones', 'numpy.ones', (['(3, 3)'], {}), '((3, 3))\n', (564, 572), False, 'import numpy\n'), ((578, 596), 'numpy.ones', 'numpy.ones', (['(3, 3)'], {}), '((3, 3))\n', (588, 596), False, 'import numpy\n'), ((699, 717), 'numpy.ones', 'numpy.ones', (['(2, 2)'], {}), '((2, 2))\n', (709, 717), False, 'import numpy\n'), ((725, 743), 'numpy.ones', 'numpy.ones', (['(2, 2)'], {}), '((2, 2))\n', (735, 743), False, 'import numpy\n'), ((165, 184), 'numpy.array', 'numpy.array', (['[-1.0]'], {}), '([-1.0])\n', (176, 184), False, 'import numpy\n'), ((200, 218), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (211, 218), False, 'import numpy\n')] |
import numpy as np
from linear_models.logistic_regression import LogisticRegression
class Perceptron(LogisticRegression):
"""A simple (binary classification) perceptron. Uses binary cross-entropy loss for updating weights.
>>NOTE: it inherits most of the code from logistic regression for simplicity.<<
Parameters
----------
learning_rate : float, default = 0.2
The learning rate for gradient descent or SGD.
method : str, default = 'gradient'
Method of fitting the model.
'gradient' for gradient descent, 'sgd' for stochastic gradient descent.
reg : str, default = None
Regularization method.
For L1 or L2, use 'l1' or 'l2' respectively.
For elastic net method, use 'elastic'.
None for no regularization.
alpha : float, default = 0
Alpha parameter controlling the 'strength' of regularization.
l1_ratio : float, default = 0
Defines the ratio of L1 regularization. Only for elastic regularization option.
The penalty added to cost is l1_ratio * L1 + 0.5 * (1 - l1_ratio) * L2.
"""
def __init__(self, learning_rate=0.2, method='gradient', reg=None, alpha=0, l1_ratio=0):
super().__init__(learning_rate, method, reg, alpha, l1_ratio)
def predict(self, x):
"""Predict the class for given input.
Parameters
----------
x : array-like
Input array.
"""
return np.heaviside(np.dot(x, self.coef) + self.intercept, 1)
| [
"numpy.dot"
] | [((1468, 1488), 'numpy.dot', 'np.dot', (['x', 'self.coef'], {}), '(x, self.coef)\n', (1474, 1488), True, 'import numpy as np\n')] |
if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import dezero as dz
import numpy as np
_NUM_ITER = 10
def f(x: dz.Variable) -> dz.Variable:
y = x ** 4 - 2 * x ** 2
return y
def gx2(x: np.ndarray) -> np.ndarray:
return 12 * x ** 2 - 4
if __name__ == '__main__':
x = dz.Variable(np.array(2.0))
for i in range(_NUM_ITER):
print(i, x)
y = f(x)
x.cleargrad()
y.backward()
x.data -= x.grad / gx2(x.data)
| [
"os.path.dirname",
"numpy.array"
] | [((377, 390), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (385, 390), True, 'import numpy as np\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_DiversityIndicator [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_DiversityIndicator&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerCorrDistDiv).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
import numpy as np
from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile
from numpy import sum as npsum
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, ylim, title
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import struct_to_dict, save_plot
from ConditionalFP import ConditionalFP
# -
# ## upload data
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_StocksS_P'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_StocksS_P'), squeeze_me=True)
Data = struct_to_dict(db['Data'])
# -
# ## compute the returns on the first 200 stocks in the database (conditioning variables)
# +
ret = diff(log(Data.Prices), 1, 1)
ret = ret[:200,:]
date = Data.Dates[1:]
q_ = ret.shape[0]
t_ = ret.shape[1]
# -
# ## Compute the Flexible probabilities conditioned via Entropy Pooling on each factor
# +
alpha = 0.2
# PRIOR
lam = 0.001
prior = exp(-lam*abs(arange(t_, 1 + -1, -1))).reshape(1,-1)
prior = prior / npsum(prior)
p = zeros((q_,t_))
rho2 = zeros((q_,q_))
distance = zeros((q_,q_))
diversity = zeros(q_)
for q in range(q_):
z = ret[q,:]
# conditioner
Conditioner = namedtuple('conditioner', ['Series', 'TargetValue', 'Leeway'])
Conditioner.Series = z.reshape(1,-1)
Conditioner.TargetValue = np.atleast_2d(z[-1])
Conditioner.Leeway = alpha
p[q,:] = ConditionalFP(Conditioner, prior)
# -
# ## Battacharayya coeff and Hellinger distances
for q1 in range(q_):
for q2 in range(q_):
rho2[q1, q2] = npsum(sqrt(p[q1,:]*p[q2,:]))
distance[q1, q2] = sqrt(abs(1 - rho2[q1, q2]))
# ## Diversity indicator (UPGMA distance)
for q in range(q_):
diversity[q] = (1 / (q_-1))*(npsum(distance[q,:])-distance[q, q])
# ## Compute the historical correlation matrix
Hcorr = corrcoef(ret)
# ## Generate the figure
fig = figure()
# historical correlation
ax = plt.subplot2grid((3,9),(1,0),rowspan=2,colspan=4)
im = plt.imshow(Hcorr, aspect='equal')
plt.xticks(r_[array([1]), arange(50, 250, 50)])
plt.yticks(r_[array([1]), arange(50, 250, 50)])
yl = ylim()
plt.grid(False)
plt.title('Historical Correlation')
cax = plt.subplot2grid((3,9),(1,4),rowspan=2,colspan=1)
plt.colorbar(im, cax=cax)
# cb = plt.colorbar(ax1, cax = cax)
# diversity
ax = plt.subplot2grid((3,9),(0,5),rowspan=1,colspan=4)
plt.imshow(tile(diversity.reshape(1,-1),(40,1)))
plt.xticks(r_[array([1]), arange(50, 250, 50)])
plt.yticks([])
plt.title('Diversity')
# Hellinger distance
ax = plt.subplot2grid((3,9),(1,5),rowspan=2,colspan=4)
plt.imshow(distance, aspect='equal')
plt.xticks(r_[array([1]), arange(50, 250, 50)])
plt.yticks(r_[array([1]), arange(50, 250, 50)])
plt.title('Hellinger Distance')
plt.grid(False)
plt.tight_layout(w_pad=-0.1);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"numpy.log",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.atleast_2d",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.yticks",
"ConditionalFP.ConditionalFP",
"matplotlib.pyplot.ylim",
"numpy.abs",
"collections.namedtuple",
"nu... | [((1037, 1061), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1050, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1414, 1440), 'ARPM_utils.struct_to_dict', 'struct_to_dict', (["db['Data']"], {}), "(db['Data'])\n", (1428, 1440), False, 'from ARPM_utils import struct_to_dict, save_plot\n'), ((1877, 1892), 'numpy.zeros', 'zeros', (['(q_, t_)'], {}), '((q_, t_))\n', (1882, 1892), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((1899, 1914), 'numpy.zeros', 'zeros', (['(q_, q_)'], {}), '((q_, q_))\n', (1904, 1914), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((1925, 1940), 'numpy.zeros', 'zeros', (['(q_, q_)'], {}), '((q_, q_))\n', (1930, 1940), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((1952, 1961), 'numpy.zeros', 'zeros', (['q_'], {}), '(q_)\n', (1957, 1961), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2670, 2683), 'numpy.corrcoef', 'corrcoef', (['ret'], {}), '(ret)\n', (2678, 2683), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2717, 2725), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (2723, 2725), False, 'from matplotlib.pyplot import figure, ylim, title\n'), ((2756, 2810), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 9)', '(1, 0)'], {'rowspan': '(2)', 'colspan': '(4)'}), '((3, 9), (1, 0), rowspan=2, colspan=4)\n', (2772, 2810), True, 'import matplotlib.pyplot as plt\n'), ((2811, 2844), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Hcorr'], {'aspect': '"""equal"""'}), "(Hcorr, aspect='equal')\n", (2821, 2844), True, 'import matplotlib.pyplot as plt\n'), ((2946, 2952), 'matplotlib.pyplot.ylim', 'ylim', ([], {}), '()\n', (2950, 2952), False, 'from matplotlib.pyplot import figure, ylim, title\n'), ((2953, 2968), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2961, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2969, 3004), 'matplotlib.pyplot.title', 'plt.title', (['"""Historical Correlation"""'], {}), "('Historical Correlation')\n", (2978, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3065), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 9)', '(1, 4)'], {'rowspan': '(2)', 'colspan': '(1)'}), '((3, 9), (1, 4), rowspan=2, colspan=1)\n', (3027, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3061, 3086), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax'}), '(im, cax=cax)\n', (3073, 3086), True, 'import matplotlib.pyplot as plt\n'), ((3140, 3194), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 9)', '(0, 5)'], {'rowspan': '(1)', 'colspan': '(4)'}), '((3, 9), (0, 5), rowspan=1, colspan=4)\n', (3156, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3287, 3301), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3297, 3301), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3324), 'matplotlib.pyplot.title', 'plt.title', (['"""Diversity"""'], {}), "('Diversity')\n", (3311, 3324), True, 'import matplotlib.pyplot as plt\n'), ((3351, 3405), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 9)', '(1, 5)'], {'rowspan': '(2)', 'colspan': '(4)'}), '((3, 9), (1, 5), rowspan=2, colspan=4)\n', (3367, 3405), True, 'import matplotlib.pyplot as plt\n'), ((3401, 3437), 'matplotlib.pyplot.imshow', 'plt.imshow', (['distance'], {'aspect': '"""equal"""'}), "(distance, aspect='equal')\n", (3411, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3565), 'matplotlib.pyplot.title', 'plt.title', (['"""Hellinger Distance"""'], {}), "('Hellinger Distance')\n", (3543, 3565), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3581), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (3574, 3581), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3610), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'w_pad': '(-0.1)'}), '(w_pad=-0.1)\n', (3598, 3610), True, 'import matplotlib.pyplot as plt\n'), ((711, 749), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (723, 749), True, 'import os.path as path\n'), ((1552, 1568), 'numpy.log', 'log', (['Data.Prices'], {}), '(Data.Prices)\n', (1555, 1568), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((1859, 1871), 'numpy.sum', 'npsum', (['prior'], {}), '(prior)\n', (1864, 1871), True, 'from numpy import sum as npsum\n'), ((2037, 2099), 'collections.namedtuple', 'namedtuple', (['"""conditioner"""', "['Series', 'TargetValue', 'Leeway']"], {}), "('conditioner', ['Series', 'TargetValue', 'Leeway'])\n", (2047, 2099), False, 'from collections import namedtuple\n'), ((2171, 2191), 'numpy.atleast_2d', 'np.atleast_2d', (['z[-1]'], {}), '(z[-1])\n', (2184, 2191), True, 'import numpy as np\n'), ((2237, 2270), 'ConditionalFP.ConditionalFP', 'ConditionalFP', (['Conditioner', 'prior'], {}), '(Conditioner, prior)\n', (2250, 2270), False, 'from ConditionalFP import ConditionalFP\n'), ((1244, 1283), 'os.path.join', 'os.path.join', (['GLOBAL_DB', '"""db_StocksS_P"""'], {}), "(GLOBAL_DB, 'db_StocksS_P')\n", (1256, 1283), False, 'import os\n'), ((1345, 1387), 'os.path.join', 'os.path.join', (['TEMPORARY_DB', '"""db_StocksS_P"""'], {}), "(TEMPORARY_DB, 'db_StocksS_P')\n", (1357, 1387), False, 'import os\n'), ((2401, 2426), 'numpy.sqrt', 'sqrt', (['(p[q1, :] * p[q2, :])'], {}), '(p[q1, :] * p[q2, :])\n', (2405, 2426), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2456, 2477), 'numpy.abs', 'abs', (['(1 - rho2[q1, q2])'], {}), '(1 - rho2[q1, q2])\n', (2459, 2477), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2576, 2597), 'numpy.sum', 'npsum', (['distance[q, :]'], {}), '(distance[q, :])\n', (2581, 2597), True, 'from numpy import sum as npsum\n'), ((2859, 2869), 'numpy.array', 'array', (['[1]'], {}), '([1])\n', (2864, 2869), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2871, 2890), 'numpy.arange', 'arange', (['(50)', '(250)', '(50)'], {}), '(50, 250, 50)\n', (2877, 2890), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2907, 2917), 'numpy.array', 'array', (['[1]'], {}), '([1])\n', (2912, 2917), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((2919, 2938), 'numpy.arange', 'arange', (['(50)', '(250)', '(50)'], {}), '(50, 250, 50)\n', (2925, 2938), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((3253, 3263), 'numpy.array', 'array', (['[1]'], {}), '([1])\n', (3258, 3263), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((3265, 3284), 'numpy.arange', 'arange', (['(50)', '(250)', '(50)'], {}), '(50, 250, 50)\n', (3271, 3284), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((3452, 3462), 'numpy.array', 'array', (['[1]'], {}), '([1])\n', (3457, 3462), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((3464, 3483), 'numpy.arange', 'arange', (['(50)', '(250)', '(50)'], {}), '(50, 250, 50)\n', (3470, 3483), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((3500, 3510), 'numpy.array', 'array', (['[1]'], {}), '([1])\n', (3505, 3510), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((3512, 3531), 'numpy.arange', 'arange', (['(50)', '(250)', '(50)'], {}), '(50, 250, 50)\n', (3518, 3531), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n'), ((1804, 1826), 'numpy.arange', 'arange', (['t_', '(1 + -1)', '(-1)'], {}), '(t_, 1 + -1, -1)\n', (1810, 1826), False, 'from numpy import arange, zeros, diff, abs, log, exp, sqrt, array, r_, corrcoef, tile\n')] |
import numpy as np
from .utils import memo, validate_tuple
__all__ = ['binary_mask', 'r_squared_mask', 'cosmask', 'sinmask',
'theta_mask']
@memo
def binary_mask(radius, ndim):
"Elliptical mask in a rectangular array"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
return sum(r) <= 1
@memo
def N_binary_mask(radius, ndim):
return np.sum(binary_mask(radius, ndim))
@memo
def r_squared_mask(radius, ndim):
"Mask with values r^2 inside radius and 0 outside"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
r2 = np.sum(coords**2, 0).astype(int)
r2[sum(r) > 1] = 0
return r2
@memo
def x_squared_masks(radius, ndim):
"Returns ndim masks with values x^2 inside radius and 0 outside"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
masks = np.asarray(coords**2, dtype=int)
masks[:, sum(r) > 1] = 0
return masks
@memo
def theta_mask(radius):
"""Mask of values giving angular position relative to center. The angle is
defined according to ISO standards in which the angle is measured counter-
clockwise from the x axis, measured in a normal coordinate system with y-
axis pointing up and x axis pointing right.
In other words: for increasing angle, the coordinate moves counterclockwise
around the feature center starting on the right side.
However, in most images, the y-axis will point down so that the coordinate
will appear to move clockwise around the feature center.
"""
# 2D only
radius = validate_tuple(radius, 2)
tan_of_coord = lambda y, x: np.arctan2(y - radius[0], x - radius[1])
return np.fromfunction(tan_of_coord, [r * 2 + 1 for r in radius])
@memo
def sinmask(radius):
"Sin of theta_mask"
return np.sin(2*theta_mask(radius))
@memo
def cosmask(radius):
"Sin of theta_mask"
return np.cos(2*theta_mask(radius))
@memo
def gaussian_kernel(sigma, truncate=4.0):
"1D discretized gaussian"
lw = int(truncate * sigma + 0.5)
x = np.arange(-lw, lw+1)
result = np.exp(x**2/(-2*sigma**2))
return result / np.sum(result)
def get_slice(coords, shape, radius):
"""Returns the slice and origin that belong to ``slice_image``"""
# interpret parameters
ndim = len(shape)
radius = validate_tuple(radius, ndim)
coords = np.atleast_2d(np.round(coords).astype(int))
# drop features that have no pixels inside the image
in_bounds = np.array([(coords[:, i] >= -r) & (coords[:, i] < sh + r)
for i, sh, r in zip(range(ndim), shape, radius)])
coords = coords[np.all(in_bounds, axis=0)]
# return if no coordinates are left
if len(coords) == 0:
return tuple([slice(None, 0)] * ndim), None
# calculate the box
lower = coords.min(axis=0) - radius
upper = coords.max(axis=0) + radius + 1
# calculate the slices
origin = [None] * ndim
slices = [None] * ndim
for i, sh, low, up in zip(range(ndim), shape, lower, upper):
lower_bound_trunc = max(0, low)
upper_bound_trunc = min(sh, up)
slices[i] = slice(int(round(lower_bound_trunc)),
int(round(upper_bound_trunc)))
origin[i] = lower_bound_trunc
return tuple(slices), origin
def slice_image(pos, image, radius):
""" Slice a box around a group of features from an image.
The box is the smallest box that contains all coordinates up to `radius`
from any coordinate.
Parameters
----------
image : ndarray
The image that will be sliced
pos : iterable
An iterable (e.g. list or ndarray) that contains the feature positions
radius : number or tuple of numbers
Defines the size of the slice. Every pixel that has a distance lower or
equal to `radius` to a feature position is included.
Returns
-------
tuple of:
- the sliced image
- the coordinate of the slice origin (top-left pixel)
"""
slices, origin = get_slice(pos, image.shape, radius)
return image[slices], origin
def get_mask(pos, shape, radius, include_edge=True, return_masks=False):
""" Create a binary mask that masks pixels farther than radius to all
given feature positions.
Optionally returns the masks that recover the individual feature pixels from
a masked image, as follows: ``image[mask][masks_single[i]]``
Parameters
----------
pos : ndarray (N x 2 or N x 3)
Feature positions
shape : tuple
The shape of the image
radius : number or tuple
Radius of the individual feature masks
include_edge : boolean, optional
Determine whether pixels at exactly one radius from a position are
included. Default True.
return_masks : boolean, optional
Also return masks that recover the single features from a masked image.
Default False.
Returns
-------
ndarray containing a binary mask
if return_masks==True, returns a tuple of [masks, masks_singles]
"""
ndim = len(shape)
radius = validate_tuple(radius, ndim)
pos = np.atleast_2d(pos)
if include_edge:
in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) <= 1
for p in pos]
else:
in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) < 1
for p in pos]
mask_total = np.any(in_mask, axis=0).T
if return_masks:
masks_single = np.empty((len(pos), mask_total.sum()), dtype=bool)
for i, _in_mask in enumerate(in_mask):
masks_single[i] = _in_mask.T[mask_total]
return mask_total, masks_single
else:
return mask_total
def mask_image(pos, image, radius, origin=None, invert=False,
include_edge=None):
""" Masks an image so that pixels farther than radius to all given feature
positions become 0.
Parameters
----------
pos : ndarray
Feature positions (N x 2 or N x 3)
image : ndarray
radius : number or tuple
Radius of the individual feature masks
origin : tuple, optional
The topleft coordinate (origin) of the image.
invert : boolean, optional
If invert==True, the features instead of the background will become 0.
include_edge : boolean, optional
Determine whether pixels at exactly one radius from a position are
included in the feature mask.
Defaults to True if invert==False, and to False if invert==True.
"""
if origin is not None:
pos = np.atleast_2d(pos) - np.array(origin)[np.newaxis, :]
if include_edge is None:
include_edge = not invert
mask_cluster = get_mask(pos, image.shape, radius, include_edge=include_edge)
if invert:
mask_cluster = ~mask_cluster
return image * mask_cluster.astype(np.uint8)
| [
"numpy.atleast_2d",
"numpy.fromfunction",
"numpy.round",
"numpy.asarray",
"numpy.any",
"numpy.indices",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.arctan2",
"numpy.meshgrid",
"numpy.all",
"numpy.arange"
] | [((1551, 1585), 'numpy.asarray', 'np.asarray', (['(coords ** 2)'], {'dtype': 'int'}), '(coords ** 2, dtype=int)\n', (1561, 1585), True, 'import numpy as np\n'), ((2371, 2431), 'numpy.fromfunction', 'np.fromfunction', (['tan_of_coord', '[(r * 2 + 1) for r in radius]'], {}), '(tan_of_coord, [(r * 2 + 1) for r in radius])\n', (2386, 2431), True, 'import numpy as np\n'), ((2741, 2763), 'numpy.arange', 'np.arange', (['(-lw)', '(lw + 1)'], {}), '(-lw, lw + 1)\n', (2750, 2763), True, 'import numpy as np\n'), ((2775, 2809), 'numpy.exp', 'np.exp', (['(x ** 2 / (-2 * sigma ** 2))'], {}), '(x ** 2 / (-2 * sigma ** 2))\n', (2781, 2809), True, 'import numpy as np\n'), ((5813, 5831), 'numpy.atleast_2d', 'np.atleast_2d', (['pos'], {}), '(pos)\n', (5826, 5831), True, 'import numpy as np\n'), ((291, 315), 'numpy.arange', 'np.arange', (['(-rad)', '(rad + 1)'], {}), '(-rad, rad + 1)\n', (300, 315), True, 'import numpy as np\n'), ((449, 470), 'numpy.array', 'np.array', (['[points[0]]'], {}), '([points[0]])\n', (457, 470), True, 'import numpy as np\n'), ((798, 822), 'numpy.arange', 'np.arange', (['(-rad)', '(rad + 1)'], {}), '(-rad, rad + 1)\n', (807, 822), True, 'import numpy as np\n'), ((956, 977), 'numpy.array', 'np.array', (['[points[0]]'], {}), '([points[0]])\n', (964, 977), True, 'import numpy as np\n'), ((1294, 1318), 'numpy.arange', 'np.arange', (['(-rad)', '(rad + 1)'], {}), '(-rad, rad + 1)\n', (1303, 1318), True, 'import numpy as np\n'), ((1452, 1473), 'numpy.array', 'np.array', (['[points[0]]'], {}), '([points[0]])\n', (1460, 1473), True, 'import numpy as np\n'), ((2319, 2359), 'numpy.arctan2', 'np.arctan2', (['(y - radius[0])', '(x - radius[1])'], {}), '(y - radius[0], x - radius[1])\n', (2329, 2359), True, 'import numpy as np\n'), ((2822, 2836), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (2828, 2836), True, 'import numpy as np\n'), ((3320, 3345), 'numpy.all', 'np.all', (['in_bounds'], {'axis': '(0)'}), '(in_bounds, axis=0)\n', (3326, 3345), True, 'import numpy as np\n'), ((6098, 6121), 'numpy.any', 'np.any', (['in_mask'], {'axis': '(0)'}), '(in_mask, axis=0)\n', (6104, 6121), True, 'import numpy as np\n'), ((385, 420), 'numpy.meshgrid', 'np.meshgrid', (['*points'], {'indexing': '"""ij"""'}), "(*points, indexing='ij')\n", (396, 420), True, 'import numpy as np\n'), ((892, 927), 'numpy.meshgrid', 'np.meshgrid', (['*points'], {'indexing': '"""ij"""'}), "(*points, indexing='ij')\n", (903, 927), True, 'import numpy as np\n'), ((1052, 1074), 'numpy.sum', 'np.sum', (['(coords ** 2)', '(0)'], {}), '(coords ** 2, 0)\n', (1058, 1074), True, 'import numpy as np\n'), ((1388, 1423), 'numpy.meshgrid', 'np.meshgrid', (['*points'], {'indexing': '"""ij"""'}), "(*points, indexing='ij')\n", (1399, 1423), True, 'import numpy as np\n'), ((7250, 7268), 'numpy.atleast_2d', 'np.atleast_2d', (['pos'], {}), '(pos)\n', (7263, 7268), True, 'import numpy as np\n'), ((3065, 3081), 'numpy.round', 'np.round', (['coords'], {}), '(coords)\n', (3073, 3081), True, 'import numpy as np\n'), ((7271, 7287), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (7279, 7287), True, 'import numpy as np\n'), ((5882, 5899), 'numpy.indices', 'np.indices', (['shape'], {}), '(shape)\n', (5892, 5899), True, 'import numpy as np\n'), ((6001, 6018), 'numpy.indices', 'np.indices', (['shape'], {}), '(shape)\n', (6011, 6018), True, 'import numpy as np\n')] |
# Uses the encoder to search for input images matching the encoded features
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from imutils import build_montages
from imutils import paths
from sklearn.model_selection import train_test_split
import config.autoencoderconfig as config
import numpy as np
import argparse
import pickle
import cv2
import random
def euclidean(a, b):
return np.linalg.norm(a-b)
def perform_search(features, index, max_results=64):
results = []
for i in range(0, len(index["features"])):
d = euclidean(features, index["features"][i])
results.append((d, i))
results = sorted(results)[:max_results]
return results
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, type=str, help="path to trained autoencoder")
ap.add_argument("-i", "--index", required=True, type=str, help="path to index of features")
ap.add_argument("-s", "--sample", type=int, default=10, help="number of testing queries to perform")
args = vars(ap.parse_args())
print("[INFO] Loading dataset...")
images_paths = list(paths.list_images(config.IMAGES_PATH))
data = []
for img_path in images_paths:
img = load_img(img_path)
img = img_to_array(img)
data.append(img)
# Normalize dataset
data = np.asarray(data)
data = data.astype("float32") / 255.0
trainX = np.asarray(data)
_, testX = train_test_split(data, test_size=0.25, random_state=42)
print("[INFO] Loading encoder...")
autoencoder = load_model(args["model"])
encoder = Model(inputs=autoencoder.inputs, outputs=autoencoder.get_layer("encoder").output)
print("[INFO] Loading image features index...")
with open(args["index"], "rb") as f:
index = pickle.loads(f.read())
print("[INFO] Encoding testing images...")
features = encoder.predict(testX)
# Randomly sample from test set for query
query_idxs = list(range(0, testX.shape[0]))
query_idxs = np.random.choice(query_idxs, size=args["sample"], replace=False)
for i in query_idxs:
# take features for current image, find similar images, init list of current images
query_features = features[i]
results = perform_search(query_features, index, max_results=10)
images = []
for (d,j) in results:
# grab result image, convert to [0, 255]
image = (trainX[j] * 255).astype("uint8")
images.append(image)
query = (testX[i] * 255).astype("uint8")
cv2.imwrite("query/query_{}.jpg".format(i), query)
montage = build_montages(images, (256, 256), (2, 5))[0]
cv2.imwrite("query/results_{}.jpg".format(i), montage) | [
"tensorflow.keras.preprocessing.image.load_img",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"numpy.random.choice",
"numpy.asarray",
"tensorflow.keras.models.load_model",
"imutils.paths.list_images",
"numpy.linalg.norm",
"imutils.build_montages",
"tensorflow.keras.prepro... | [((549, 570), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (563, 570), True, 'import numpy as np\n'), ((874, 899), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (897, 899), False, 'import argparse\n'), ((1511, 1527), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1521, 1527), True, 'import numpy as np\n'), ((1583, 1599), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1593, 1599), True, 'import numpy as np\n'), ((1615, 1670), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(data, test_size=0.25, random_state=42)\n', (1631, 1670), False, 'from sklearn.model_selection import train_test_split\n'), ((1729, 1754), 'tensorflow.keras.models.load_model', 'load_model', (["args['model']"], {}), "(args['model'])\n", (1739, 1754), False, 'from tensorflow.keras.models import load_model\n'), ((2182, 2246), 'numpy.random.choice', 'np.random.choice', (['query_idxs'], {'size': "args['sample']", 'replace': '(False)'}), "(query_idxs, size=args['sample'], replace=False)\n", (2198, 2246), True, 'import numpy as np\n'), ((1297, 1334), 'imutils.paths.list_images', 'paths.list_images', (['config.IMAGES_PATH'], {}), '(config.IMAGES_PATH)\n', (1314, 1334), False, 'from imutils import paths\n'), ((1399, 1417), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_path'], {}), '(img_path)\n', (1407, 1417), False, 'from tensorflow.keras.preprocessing.image import load_img\n'), ((1432, 1449), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1444, 1449), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((2792, 2834), 'imutils.build_montages', 'build_montages', (['images', '(256, 256)', '(2, 5)'], {}), '(images, (256, 256), (2, 5))\n', (2806, 2834), False, 'from imutils import build_montages\n')] |
# coding=UTF-8
"""
--------------------------------------------------------
Copyright (c) ****-2018 ESR, Inc. All rights reserved.
--------------------------------------------------------
Author: <NAME>
Date: 2018/10/29
Design Name: The user interface of the DDS software
Purpose: Design an interface software for customers to
set the dds hardware using Python 3.6.3
--------------------------------------------------------
"""
import ctypes
import time
# import csv
# import threading
import numpy as np
import os
def num_to_bytes(num, bytenum, high_head=True):
if high_head:
return np.array([num], dtype='>u8').tobytes()[-bytenum:] # big-endian
else:
return np.array([num], dtype='<u8').tobytes()[:bytenum] # little-endian
def bytes_to_num(bytes_, signed_=True, big_=True):
if not signed_:
if big_:
return int.from_bytes(bytes_, byteorder='big')
else:
return int.from_bytes(bytes_, byteorder='little')
else:
if big_:
return int.from_bytes(bytes_, byteorder='big', signed=True)
else:
return int.from_bytes(bytes_, byteorder='little', signed=True)
def bytes_to_hexstr(s, space=True):
# ss = s_str.encode('hex') # original solution in Python2
ss = s.hex() # original solution in Python2
if space:
sl = [ss[i:i + 2] for i in range(0, len(ss), 2)]
return ' '.join(sl)
else:
return ss
# def dec2bin_mal(mal, dig):
# mal_1 = mal - int(mal)
# bins = ''
# while mal_1 >= 0:
# mal_1 *= 2
# if mal_1 >= 1 and len(bins) < dig:
# bins = bins + '1'
# mal_1 = mal_1 - int(mal_1)
# elif mal_1 < 1 and len(bins) < dig:
# bins = bins + '0'
# mal_1 = mal_1 - int(mal_1)
# else:
# break
# return bins
# def DEC_to_BIN(num_, byte_num, dem_dig):
# num = float(num_)
# integer = num // 1
# decimal = num - integer
# integer_BIN = str(bin(int(integer)))
# decimal_BIN = dec2bin_mal(decimal, dem_dig)
# out_put = integer_BIN + decimal_BIN
# out_put1 = int(out_put, 2)
# output = num_to_bytes(out_put1, byte_num)
# return output
# class ReadCSV(object):
# def __init__(self, PATH):
# self.path = PATH
# self.csvHand = open(self.path, "r")
# self.readcsv = csv.reader(self.csvHand)
# self.buffer = [row for row in self.readcsv]
# self.csvHand.close()
# # print self.buffer
#
# def demarcate(self):
# # print self.buffer
# fine_data1 = [row for row in self.buffer]
# return fine_data1
class GenWave(object):
# SCAN_BYTES_LIST = [b'\x00\x00', b'\x00\x80', b'\x00\xA0', b'\x00\xC0', b'\x00\xE0']
# scan_bytes_list = [b'\x00\x00', b'\x00\x80', b'\x00\xA0', b'\x00\xC0', b'\x00\xE0']
"""A class used for the Waveform Generation, including DDS and TTL waveform
:param hp_channel: A flag used to distinguish the channel.
True/False means 2G5-DDS/1G-DDS
:type hp_channel: bool
"""
# SCAN_BYTES_LIST = [b'\x00\x00', b'\x00\x80', b'\x00\xA0', b'\x00\xC0', b'\x00\xE0']
def __init__(self):
# scan list : ["no scan", "amp", "freq", "phase", "time"]
self.SCAN_BYTES_LIST = [b'\x00\x00',
b'\x00\x40', b'\x00\x50', b'\x00\x60', b'\x00\x70',
# b'\x00\x00',
b'\x00\x80', b'\x00\x90', b'\x00\xA0', b'\x00\xB0']
pass
# Level1--DDS波形的基本转换操作
# 包含 amplitude_hex, frequency_hex, phase_hex 三项
def amplitude_hex(self, amp, hp_channel):
# """
# :param amp: float, range: [0,1]
# :param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
# :return: bytes, length = 2 (12/14bit valid)
# """
"""To get the bytes format of amplitude
:param amp: Amplitude of DDS
:type amp: float
:param hp_channel: A flag used to distinguish the channel. True/False means 2G5-DDS/1G-DDS
:type hp_channel: bool
:returns: Bytes representing the amplitude
:rtype: bytes, length = 2 (12/14bit valid)
"""
if hp_channel:
full_scale = 2**12-1
else:
full_scale = 2**14-1
amp_dec = int(amp*full_scale)
amp_byte = num_to_bytes(amp_dec, 2) # transfer the decimal int into hex str
return amp_byte
def frequency_hex(self, freq, hp_channel):
# """
# :param freq: int or float, unit: MHz
# :param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
# :return: [bytes, float, float]; [length, unit, unit] = [4 (32bit valid), MHz, Hz]
# """
"""To get the bytes format of frequency
:param freq: Frequency of DDS
:type freq: float
:param hp_channel: A flag used to distinguish the channel. True/False means 2G5-DDS/1G-DDS
:type hp_channel: bool
:returns: A list of the results after digitizing
:rtype: [bytes, float, float]; [length, unit, unit] = [4 (32bit valid), MHz, Hz]
"""
byte_full_scale = 2**32
if hp_channel:
sam_rate = 2500.0
else:
sam_rate = 1000.0
freq_dec = int(round(freq*byte_full_scale/sam_rate))
# int() int will only be less than real
freq_byte = num_to_bytes(freq_dec, 4) # the freq unit is MHz
real_freq = freq_dec*sam_rate/byte_full_scale
diff_freq = (real_freq - freq)*10**6 # the diff_freq unit is Hz
"""
return specification:
1--bytes for the freq
2--the real digital freq
3--the difference of freq (real - set)
"""
return [freq_byte, real_freq, diff_freq]
def phase_hex(self, phase, hp_channel):
# """
# :param phase: float, unit: pi, range: [0,2)
# :param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
# :return: bytes, length = 2 (16bit valid)
# """
"""To get the bytes format of phase
:param phase: Frequency of DDS
:type phase: float
:param hp_channel: A flag used to distinguish the channel. True/False means 2G5-DDS/1G-DDS
:type hp_channel: bool
:returns: Bytes representing the phase
:rtype: bytes, length = 2 (16bit valid)
"""
if hp_channel:
phase_dec = int(((phase-1) % 2)*2**15)
# add a pi offset to the phase
else:
phase_dec = int((phase % 2)*2**15)
phase_byte = num_to_bytes(phase_dec, 2)
return phase_byte
def address_gen(self, data_number):
"""地址生成:用于写入以及播放
:param data_number: int
:return: [bytes, bytes]; length = 2 (for each one)
"""
dds_start = num_to_bytes(0, 2)
dds_stop = num_to_bytes(data_number - 1, 2)
return [dds_start, dds_stop]
# Level2--DDS与TTL波形生成, 以及地址生成
# 包含 dds_data_form, ttl_data_form以及 pulse_data_encode, sequence_address_gen 四项
def dds_data_form(self, hp_channel, amp, freq, phase):
"""DDS波形的生成操作——调用上面的3个,生成DDS波形
:param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
:param amp: float, range: [0,1]
:param freq: int or float, unit: MHz
:param phase: float, unit: pi, range: [0,2)
:return: [bytes, float, float]; [length, unit, unit] = [8, MHz, Hz]
"""
amp_word = self.amplitude_hex(amp, hp_channel)
freq_list = self.frequency_hex(freq, hp_channel)
phase_word = self.phase_hex(phase, hp_channel)
if hp_channel:
dds_word = freq_list[0] + amp_word + phase_word
else:
dds_word = amp_word + phase_word + freq_list[0]
"""
return specification:
1--bytes for one single waveform of DDS play sequence
2--the real digital freq
3--the difference of freq (real - set)
"""
return [dds_word, freq_list[1], freq_list[2]]
def ttl_data_form(self, hp_channel, level, time):
"""TTL波形的基本转换操作,只转换单个TTL的高/低电平(并且返回实际的时间值、以及实际的与设置值的偏差)
:param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
:param level: str, 'high'/'low'
:param time: float, unit: us
:return: [bytes, float, float]; [length, unit, unit] = [4, us, us]
"""
ttl_sign_bit = 2**26
if hp_channel:
real_time = int(round(time*156.25))-1
else:
real_time = int(round(time*125.0))-1
diff_time = real_time - time
if level == 'high':
single_ttl = num_to_bytes(real_time + ttl_sign_bit, 4)
else:
single_ttl = num_to_bytes(real_time, 4)
"""
return specification:
1--bytes for one single waveform of TTL play sequence
2--the real digital time
3--the difference of time (real - set)
"""
return [single_ttl, real_time, diff_time]
def pulse_data_encode(self, hp_channel, raw_data_list):
"""波形的转换操作; 调用DDS与TTL的转换(兼容一个DDS对应多个TTL波形的模式)
:param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
:param raw_data_list: [scan_sign, [A, f(MHz), fai(pi)], [level, time],..]
: scan_sign: int, [0,1, .. ,4]--["no scan", "amp", "freq", "phase", "time"]
: amp: float, range: [0,1]
: freq: int or float, unit: MHz
: phase: float, unit: pi, range: [0,2)
: level: str, 'high'/'low'
: time: float, unit: us
:return: [bytes, bytes, int, int]; [length_para1, length_para2] = [10*para_3, 4*para_4]
"""
dds_data = b''
ttl_data = b''
# index = 0
sequence_number = len(raw_data_list)
# ttl_number = 0
for index_1 in range(sequence_number):
# temp_ttl_num = len(raw_data_list[index_1])-1 # the first one must be dds_list
# ttl_number += temp_ttl_num
scan_sign = raw_data_list[index_1][0]
dds_temp_list = raw_data_list[index_1][1]
# print(dds_temp_list)
dds_data_list = self.dds_data_form(hp_channel, dds_temp_list[0], dds_temp_list[1], dds_temp_list[2])
dds_data += self.SCAN_BYTES_LIST[scan_sign] + dds_data_list[0]
ttl_temp_list = raw_data_list[index_1][2]
ttl_data_list = self.ttl_data_form(hp_channel, ttl_temp_list[0], ttl_temp_list[1])
ttl_data += ttl_data_list[0]
# print bytes_to_hexstr(dds_data)
# print bytes_to_hexstr(ttl_data)
# print ' '
"""
return specification:
dds_number is len(dds_data)/10
ttl_number is len(ttl_data)/4
"""
return [dds_data, ttl_data, sequence_number]
# Level3--序列生成
# 仅 pulse_data_gen 一项
def pulse_data_gen(self, hp_channel, raw_data_list):
"""序列生成:调用如上两个,生成DDS与ttl的波形,并且也生成地址
:param hp_channel: bool, True/False means 2G5-DDS/1G-DDS
:param raw_data_list: [[A,f(MHz),fai(pi)],[level,time],..]
: amp: float, range: [0,1]
: freq: int or float, unit: MHz
: phase: float, unit: pi, range: [0,2)
: level: str, 'high'/'low'
: time: float, unit: us
:return: [bytes, bytes, bytes, bytes]; length_3rd = 6
"""
encode_data_list = self.pulse_data_encode(hp_channel, raw_data_list)
sequence_number = len(raw_data_list)
# ttl_number = encode_data_list[3]
address_list = self.address_gen(sequence_number)
dds_download_data = address_list[0] + address_list[1] + encode_data_list[0]
ttl_download_data = address_list[0] + address_list[1] + encode_data_list[1]
play_address = address_list[0] + address_list[1]
"""
return specification:
dds_number is len(dds_data[4:])/10
ttl_number is len(ttl_data[4:])/4
"""
return [dds_download_data, ttl_download_data, play_address]
# ex--原始数据预处理
# 目的:给实验波形加上头尾波形的数据处理————为了使得不做实验时,没有多余的输出。
# Level1--基本指令(分为:头尾操作)
def raw_data_list_head(self, raw_data_list):
""" this function adds a 'head' to the wave_data_list"""
raw_data_list.insert([0, [0, 0, 0], ['low', 5]])
# return(raw_data_list)
def raw_data_list_tail(self, raw_data_list):
""" this function adds a 'tail' to the wave_data_list to end the play"""
raw_data_list.extend([[0, [0, 0, 0], ['low', 5]]]) # no scan and no waveform for 5us
# raw_data_list.append([[0,0,0],['low',5]])
# a = self.raw_data_list_tail(a) is equal to self.raw_data_list_tail(a)
def raw_data_list_head_a(self, raw_data_list):
del raw_data_list[0]
def raw_data_list_tail_a(self, raw_data_list):
raw_data_list.pop()
# Level2--高阶指令(仅分为:预处理、后处理)
def raw_data_list_pro(self, raw_data_list):
# self.raw_data_list_head(raw_data_list)
self.raw_data_list_tail(raw_data_list)
def raw_data_list_after_pro(self, raw_data_list):
# self.raw_data_list_head_a(raw_data_list)
self.raw_data_list_tail_a(raw_data_list)
#################################################################
# Scan data generation [basic functions]
#
# 5 functions
#################################################################
def scan_gen_0(self, scan_para_list):
"""To get the scan download data for "no scan"
:param scan_para_list: list of [N_i, 0, 0...]
:type scan_para_list: list
:returns: Bytes representing the data byte stream
:rtype: bytes, length = 6 * len(scan_para_list)
"""
scan_data_bytes = b''
cnt_number = 0
para_len = len(scan_para_list[0]) - 1
for index in range(len(scan_para_list)):
cnt_number += scan_para_list[index][0] # eg: Assumed N_i = 2, it will loop 2 times. The bytes is \x01
scan_number_bytes = num_to_bytes((scan_para_list[index][0]-1) % (2**16), 2)
scan_data_bytes += scan_number_bytes
for para_index in range(para_len):
scan_para_bytes = num_to_bytes(0, 4)
scan_data_bytes += scan_para_bytes + scan_para_bytes
return [scan_data_bytes, cnt_number]
def scan_gen_1(self, scan_para_list):
"""To get the scan download data for "amp"
:param scan_para_list: list of [N_i, amp1, amp2 ...]
:type scan_para_list: list
:returns: Bytes representing the data byte stream
:rtype: bytes, length = 10 * len(scan_para_list)
"""
scan_data_bytes = b''
cnt_number = 0
para_len = len(scan_para_list[0]) - 1
for index in range(len(scan_para_list)):
cnt_number += scan_para_list[index][0] # eg: Assumed N_i = 2, it will loop 2 times. The bytes is \x01
scan_number_bytes = num_to_bytes((scan_para_list[index][0]-1) % (2**16), 2)
scan_data_bytes += scan_number_bytes
for para_index in range(para_len):
scan_para_bytes_2g5 = num_to_bytes(0, 2) + self.amplitude_hex(scan_para_list[index][para_index+1], True)
scan_para_bytes_1g = num_to_bytes(0, 2) + self.amplitude_hex(scan_para_list[index][para_index+1], False)
scan_data_bytes += scan_para_bytes_2g5 + scan_para_bytes_1g
return [scan_data_bytes, cnt_number]
def scan_gen_2(self, scan_para_list):
"""To get the scan download data for "freq"
:param scan_para_list: list of [N_i, freq1, freq2...]
:type scan_para_list: list
:returns: Bytes representing the data byte stream
:rtype: bytes, length = 10 * len(scan_para_list)
"""
scan_data_bytes = b''
cnt_number = 0
para_len = len(scan_para_list[0]) - 1
for index in range(len(scan_para_list)):
cnt_number += scan_para_list[index][0] # eg: Assumed N_i = 2, it will loop 2 times. The bytes is \x01
scan_number_bytes = num_to_bytes((scan_para_list[index][0]-1) % (2**16), 2)
scan_data_bytes += scan_number_bytes
for para_index in range(para_len):
scan_para_bytes_2g5 = self.frequency_hex(scan_para_list[index][para_index+1], True)[0]
scan_para_bytes_1g = self.frequency_hex(scan_para_list[index][para_index+1], False)[0]
scan_data_bytes += scan_para_bytes_2g5 + scan_para_bytes_1g
return [scan_data_bytes, cnt_number]
def scan_gen_3(self, scan_para_list):
"""To get the scan download data for "phase"
:param scan_para_list: list of [N_i, phase1, phase2...]
:type scan_para_list: list
:returns: Bytes representing the data byte stream
:rtype: bytes, length = 10 * len(scan_para_list)
"""
scan_data_bytes = b''
cnt_number = 0
para_len = len(scan_para_list[0]) - 1
for index in range(len(scan_para_list)):
cnt_number += scan_para_list[index][0] # eg: Assumed N_i = 2, it will loop 2 times. The bytes is \x01
scan_number_bytes = num_to_bytes((scan_para_list[index][0]-1) % (2**16), 2)
scan_data_bytes += scan_number_bytes
for para_index in range(para_len):
scan_para_bytes_2g5 = num_to_bytes(0, 2) + self.phase_hex(scan_para_list[index][para_index+1], True)
scan_para_bytes_1g = num_to_bytes(0, 2) + self.phase_hex(scan_para_list[index][para_index+1], False)
scan_data_bytes += scan_para_bytes_2g5 + scan_para_bytes_1g
return [scan_data_bytes, cnt_number]
def scan_gen_4(self, scan_para_list):
"""To get the scan download data for "time"
:param scan_para_list: list of [N_i, time1, time2...]
:type scan_para_list: list
:returns: Bytes representing the data byte stream
:rtype: bytes, length = 10 * len(scan_para_list)
"""
scan_data_bytes = b''
cnt_number = 0
para_len = len(scan_para_list[0]) - 1
for index in range(len(scan_para_list)):
cnt_number += scan_para_list[index][0] # eg: Assumed N_i = 2, it will loop 2 times. The bytes is \x01
scan_number_bytes = num_to_bytes((scan_para_list[index][0]-1) % (2**16), 2)
scan_data_bytes += scan_number_bytes
for para_index in range(para_len):
scan_para_bytes_2g5 = self.ttl_data_form(True, 'low', scan_para_list[index][para_index+1])[0]
scan_para_bytes_1g = self.ttl_data_form(False, 'low', scan_para_list[index][para_index+1])[0]
scan_data_bytes += scan_para_bytes_2g5 + scan_para_bytes_1g
return [scan_data_bytes, cnt_number]
#################################################################
# Scan data generation [higher-order functions]
#
# Only 1 function
#################################################################
def scan_data_gen(self, var_type1, scan_para_list):
"""
:param var_type1: [0,1,2,3,4] represents ["no scan", "amp", "freq", "phase", "time"]
:type var_type1: int
:param scan_para_list: list (of [data, N_i])
:type scan_para_list: list
:returns: list of [download data, cnt_number], length = 4 + 18*len(scan_para_list)
:rtype: list
"""
if var_type1 == 0:
scan_para_gen = self.scan_gen_0(scan_para_list)
elif var_type1 == 1:
scan_para_gen = self.scan_gen_1(scan_para_list)
elif var_type1 == 2:
scan_para_gen = self.scan_gen_2(scan_para_list)
elif var_type1 == 3:
scan_para_gen = self.scan_gen_3(scan_para_list)
elif var_type1 == 4:
scan_para_gen = self.scan_gen_4(scan_para_list)
else:
print('incorrect input for var_type')
exit()
address_list = self.address_gen(len(scan_para_list))
scan_download_data = address_list[0] + address_list[1] + scan_para_gen[0]
return [scan_download_data, scan_para_gen[1]]
#################################################################
# AD5371 data generation [basic functions]
#
# 4 functions
#################################################################
def ad5371_addr_gen(self, ch_num_list, raw_wave_list):
addr_start = num_to_bytes(0, 3)
addr_stop_num = len(ch_num_list)*len(raw_wave_list) - 1
addr_stop = num_to_bytes(addr_stop_num, 3)
addr_word = addr_start + addr_stop
return addr_word
def ad5371_ch2bytes(self, ch_num_list):
# g0_ch_list = [b'\x00\xC8', b'\x00\xC9', b'\x00\xCA', b'\x00\xCB',
# b'\x00\xCC', b'\x00\xCD', b'\x00\xCE', b'\x00\xCF']
# g1_ch_list = [b'\x00\xD0', b'\x00\xD1', b'\x00\xD2', b'\x00\xD3',
# b'\x00\xD4', b'\x00\xD5', b'\x00\xD6', b'\x00\xD7']
# g2_ch_list = [b'\x00\xD8', b'\x00\xD9', b'\x00\xDA', b'\x00\xDB',
# b'\x00\xDC', b'\x00\xDD', b'\x00\xDE', b'\x00\xDF']
# g3_ch_list = [b'\x00\xE0', b'\x00\xE1', b'\x00\xE2', b'\x00\xE3',
# b'\x00\xE4', b'\x00\xE5', b'\x00\xE6', b'\x00\xE7']
# g4_ch_list = [b'\x00\xE8', b'\x00\xE9', b'\x00\xEA', b'\x00\xEB',
# b'\x00\xEC', b'\x00\xED', b'\x00\xEE', b'\x00\xEF']
reg_ch_list = [b'\x00\xC8', b'\x00\xC9', b'\x00\xCA', b'\x00\xCB', # group0
b'\x00\xCC', b'\x00\xCD', b'\x00\xCE', b'\x00\xCF',
b'\x00\xD0', b'\x00\xD1', b'\x00\xD2', b'\x00\xD3', # group1
b'\x00\xD4', b'\x00\xD5', b'\x00\xD6', b'\x00\xD7',
b'\x00\xD8', b'\x00\xD9', b'\x00\xDA', b'\x00\xDB', # group2
b'\x00\xDC', b'\x00\xDD', b'\x00\xDE', b'\x00\xDF',
b'\x00\xE0', b'\x00\xE1', b'\x00\xE2', b'\x00\xE3', # group3
b'\x00\xE4', b'\x00\xE5', b'\x00\xE6', b'\x00\xE7',
b'\x00\xE8', b'\x00\xE9', b'\x00\xEA', b'\x00\xEB', # group4
b'\x00\xEC', b'\x00\xED', b'\x00\xEE', b'\x00\xEF']
ch_bytes_list = []
for index in range(len(ch_num_list)):
ch_bytes_list.append(reg_ch_list[ch_num_list[index]])
return ch_bytes_list
def ad5371_pts2bytes(self, pts_data):
"""
:param pts_data: -10 ~ +10, unit: V
:type pts_data: float
:param scan_para_list: list (of [data, N_i])
:type scan_para_list: list
:returns: list of [download data, cnt_number], length = 4 + 10*len(scan_para_list)
:rtype: bytes
"""
fsc = 2**13 - 1
offset = 2**13
data_in_num = (pts_data/10)*fsc + offset
# y = np.sin((float(x)/sin_pts+0)*2*np.pi) * (2**13-1) + 2**13
data_in_bytes = num_to_bytes(int(data_in_num)*4, 2)
return data_in_bytes
def ad5371_data_gen(self, ch_num_list, raw_wave_list):
ch_bytes_list = self.ad5371_ch2bytes(ch_num_list)
addr_word = self.ad5371_addr_gen(ch_num_list, raw_wave_list)
dac_download_data = addr_word
for index_pts in range(len(raw_wave_list)):
for index_ch in range(len(ch_bytes_list)):
temp_data = self.ad5371_pts2bytes(raw_wave_list[index_pts][index_ch])
dac_download_data += ch_bytes_list[index_ch] + temp_data
return dac_download_data
class HardWare(GenWave):
"""A class used for the basic Hardware controlling, including the instructions
"""
#################################################################
# __init__ and WR/RD for the USB_CY68013
#################################################################
def __init__(self, dev_index=0, test_mode=False):
GenWave.__init__(self)
""" init function just keep the same"""
if test_mode:
import random
def read():
return random.choice(
[b'\xff\x00\x00\x00', b'\xff\x00\x00\x01', b'\x00' * 4, b'\x11' * 4, b'\x22' * 4, b'\x11' * 6,
b'\xff' * 4])
def write(msg):
return len(msg)
class dll:
def flushInputBuffer(self, *args):
pass
self.dev_index = 0
self.read = read
self.write = write
self.dll = dll()
self.stop()
return
import platform
bits = platform.architecture()[0]
dll_load = False
# try:
# self.dll = ctypes.cdll.LoadLibrary('driver/xyj.dll')
# dll_load = True
# except:
# print 'Load xyj_dll file fail!'
bit32_driver_possible_list = ['driver/xyj_x86.dll', 'xyj_x86.dll', 'driver/xyj.dll', 'xyj.dll']
bit64_driver_possible_list = ['driver/xyj_x64.dll', 'xyj_x64.dll', 'driver/xyj.dll', 'xyj.dll']
if '32bit' in bits:
driver_possible_list = bit32_driver_possible_list
else:
driver_possible_list = bit64_driver_possible_list
d_index = 0
for ii in range(len(driver_possible_list)):
if os.path.exists(driver_possible_list[ii]):
d_index = ii
break
try:
if not dll_load:
self.dll = ctypes.cdll.LoadLibrary(driver_possible_list[d_index])
except:
import traceback
traceback.print_exc()
# print('\nLoad FPGA USB fail!\n')
print('load FPGA USB fail')
return
self.dll.read.restype = ctypes.c_bool
self.dll.open.restype = ctypes.c_bool
self.dll.read_until.restype = ctypes.c_bool
self.dll.GetPara.restype = ctypes.c_ulong
self.dev_index = ctypes.c_ubyte(0)
self.open(dev_index)
self.stop()
def open(self, index=0):
print('index=', index)
self.dll.open.restype = ctypes.c_bool
return self.dll.open(ctypes.c_byte(index))
# def stop_old(self):
# """
# stop the device.
# :return:
# """
# # use for stop counter
# self.run_flag = False
# time.sleep(0.01)
# self.write(b'\x00' * 2)
# time.sleep(0.01)
# # clear buffer
# self.dll.flushInputBuffer()
# global CountDataBuf
# CountDataBuf = [0, []]
def stop(self):
"""
stop the device.
:return:
"""
# global stop_flag
# stop_flag = True
time.sleep(0.01)
self.write(b'\x00' * 2)
time.sleep(0.01)
# clear buffer
self.dll.flushInputBuffer()
def read(self, num=4096):
""" A new read function for Python3"""
# print 'enter read'
bufp = (ctypes.c_ubyte * 4100)()
# bufp = ctypes.c_char_p(b'\x00' * num)
cnum = ctypes.c_long(num)
cnum_p = ctypes.pointer(cnum)
if not self.dll.read(bufp, cnum_p):
# fh.write('F%d\n' % cnum_p.contents.value)
self.dll.ResetInputEnpt()
return b''
# fh.write('S%d\n' % cnum_p.contents.value)
# fh.flush()
# print num, 'rx num', cnum_p.contents.value
# return str(bytearray(bufp))[:cnum_p.contents.value]
return bytearray(bufp)[:cnum_p.contents.value] # for Python3, we can return the bytes format rather than str
def write(self, msg):
""" A new write function for Python3"""
# bufp = ctypes.c_char_p(bytes(msg, 'utf-8'))
bufp = ctypes.c_char_p(msg)
self.dll.write(bufp, ctypes.c_long(len(msg)))
return len(msg)
#################################################################
# Some basic data pro
#
# Include 3 functions
#################################################################
# register transform
def reg_wr2rd(self, reg_wr): # eg:b'\x00'-->b'\x80'
reg_rd = num_to_bytes(bytes_to_num(reg_wr) + 128, 1)
return reg_rd
def reg_rd2wr(self, reg_rd):
reg_wr = num_to_bytes(bytes_to_num(reg_rd) % 128, 1)
return reg_wr
def ch2identify(self, ch_num):
""" 判断是否是高采样率的DDS, HP:high performance"""
if ch_num < 4:
hp_channel = True
reg_wr = b'\x0B'
elif ch_num < 16:
hp_channel = False
reg_wr = b'\x0E'
else:
print('the ch_num is over range!')
exit()
return hp_channel, reg_wr
#################################################################
# To Enable/Disable some HW functions
#
# Include 4 functions
#################################################################
def sync_on(self):
""" To enable the SYNC signal output"""
self.write(b'\x00\x21')
time.sleep(0.001)
def sync_off(self):
""" To disable the SYNC signal output"""
self.write(b'\x00\x22')
time.sleep(0.001)
def auto_clear_on(self):
""" To enable the auto clear (after each play)"""
self.write(b'\x00\x23')
time.sleep(0.001)
def auto_clear_off(self):
""" To disable the auto clear (after each play)"""
self.write(b'\x00\x24')
time.sleep(0.001)
#################################################################
# To configure and read register
#
# Include 4 functions
#################################################################
def s_configure(self, ch_num_byte, reg_wr, wr_data):
"""short/long configure (register) for DDS
:param ch_num_byte: bytes, length = 2
:param reg_wr: bytes, length = 1
:param wr_data: bytes, length = 4
:return:
"""
if len(wr_data) == 4:
self.write(b'\x00\x06' + ch_num_byte + b'\x00' + reg_wr + wr_data)
time.sleep(0.001)
def l_configure(self, ch_num_byte, reg_wr, wr_data):
if len(wr_data) == 8:
self.write(b'\x00\x07' + ch_num_byte + b'\x00' + reg_wr + wr_data)
time.sleep(0.001)
def s_read(self, ch_num_byte, reg_wr, right_rd=b'null'):
"""short/long read (register) for DDS
:param ch_num_byte: bytes, length = 2
:param reg_wr: bytes, length = 1; the reg_wr will be transfer into reg_rd
:param right_rd: bytes, length = 4; the input is the right result for the readout
:return: 1. result (type:string); 2. result, True/False
"""
self.write(b'\x00\x08'+ch_num_byte + self.reg_wr2rd(reg_wr) + b'\x00')
time.sleep(0.001)
result = self.read()
if right_rd == b'null':
return bytes_to_hexstr(result)
elif len(right_rd) == 4:
if result == b'\x08'+reg_wr + right_rd:
return bytes_to_hexstr(result), True
else:
return bytes_to_hexstr(result), False
def l_read(self, ch_num_byte, reg_wr, right_rd=b'null'):
self.write(b'\x00\x09'+ch_num_byte + self.reg_wr2rd(reg_wr) + b'\x00')
time.sleep(0.001)
result = self.read()
if right_rd == b'null':
return bytes_to_hexstr(result)
elif len(right_rd) == 8:
if result == b'\x09'+reg_wr + right_rd:
return bytes_to_hexstr(result), True
else:
return bytes_to_hexstr(result), False
#################################################################
# To set the parameter
# Part1: delay set
# Part2: wr/rd stamp set for SPI/BPI, and Play set for AD5371
#################################################################
# Part1: delay set
def ttl_coarse_delay_set(self, ch_num, coarse_delay):
# delay_step: 8 ns(1G-DDS) / 6.4 ns(2G5-DDS)
"""
:param ch_num: int; range(0, 16, 1)
:param coarse_delay: int; range(0, 16, 1), default: 7
:return:
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
coarse_delay_byte = num_to_bytes(coarse_delay, 2)
# print bytes_to_hexstr(b'\x00\x0A' + ch_num_byte + coarse_delay_byte)
self.write(b'\x00\x0A' + ch_num_byte + coarse_delay_byte)
# time.sleep(0.001)
def ttl_serdese_delay_set(self, ch_num, serdese_delay):
# delay_step: 1.6 ns
"""
:param ch_num: int; range(0, 16, 1)
:param serdese_delay: int; for 1G-DDS --- range(0, 5, 1), default: 4; for 2G5-DDS --- range(0, 4, 1), default: 0;
:return:
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
serdese_delay_byte = num_to_bytes(serdese_delay, 2)
# print bytes_to_hexstr(b'\x00\x0B' + ch_num_byte + serdese_delay_byte)
self.write(b'\x00\x0B' + ch_num_byte + serdese_delay_byte)
# time.sleep(0.001)
def ttl_odelaye_delay_set(self, ch_num, delay_tap):
# delay_step: ~52 ps
"""
:param ch_num: int; range(0, 16, 1)
:param delay_tap: int; range(0, 32, 1), 1G-DDS_default: 0, 2G5-DDS_default: 0;
:return:
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
delay_tap_byte = num_to_bytes(delay_tap, 2)
# print bytes_to_hexstr(b'\x00\x0C' + ch_num_byte + delay_tap_byte)
self.write(b'\x00\x0C' + ch_num_byte + delay_tap_byte)
# time.sleep(0.001)
# def dds_coarse_delay_set(self, ch_num, coarse_delay):
# ch_num_byte = num_to_bytes(2**ch_num,2)
# coarse_delay_byte = num_to_bytes(coarse_delay,2)
# # print bytes_to_hexstr(b'\x00\x0A' + ch_num_byte + coarse_delay_byte)
# self.write(b'\x00\x0D' + ch_num_byte + coarse_delay_byte)
# # time.sleep(0.001)
def dds_serdese_delay_set(self, ch_num, serdese_delay):
# delay_step: 1.6 ns
"""
:param ch_num: int; range(4, 16, 1)
:param serdese_delay: int; for 1G-DDS --- range(0, 5, 1), default: 0; for 2G5-DDS --- range(0, 4, 1), default: 0;
:return:
"""
# if ch_num > 3:
ch_num_byte = num_to_bytes(2**ch_num, 2)
serdese_delay_byte = num_to_bytes(serdese_delay, 2)
# print('dds_serdese_delay_set ', bytes_to_hexstr(b'\x00\x0E' + ch_num_byte + serdese_delay_byte))
self.write(b'\x00\x0E' + ch_num_byte + serdese_delay_byte)
# time.sleep(0.001)
# def dds_odelaye_delay_set(self, ch_num, delay_tap):
# ch_num_byte = num_to_bytes(2**ch_num,2)
# delay_tap_byte = num_to_bytes(delay_tap,2)
# # print bytes_to_hexstr(b'\x00\x0C' + ch_num_byte + delay_tap_byte)
# self.write(b'\x00\x0F' + ch_num_byte + delay_tap_byte)
# # time.sleep(0.001)
def sync_delay_set(self, ch_num, delay_tap):
# delay_step: ~52 ps
"""
:param ch_num: int; range(0, 16, 1)
:param delay_tap: int; range(0, 32, 1), 1G-DDS_default: 15, 2G5-DDS_default: 12;
:return:
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
delay_tap_byte = num_to_bytes(delay_tap, 2)
# print bytes_to_hexstr(b'\x00\x0D' + ch_num_byte + delay_tap_byte)
self.write(b'\x00\x18' + ch_num_byte + delay_tap_byte) # the former one is b'\x0D'
# time.sleep(0.001)
# Part2: wr/rd stamp set for SPI/BPI, and Play set for AD5371
def wr_stamp_set(self, ch_num, stamp_list):
"""
:param ch_num: int; range(0, 16, 1)
:param stamp_list: list (of int), length = 3; default is shown below
ch_num_list = [2,3,4]
stamp_list_wr = [[0,1,3],[0,2,5],[0,2,5]]
:return:
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
stamp = b''
for index_1 in range(3):
stamp += num_to_bytes(stamp_list[index_1] % 128, 1)
print(bytes_to_hexstr(b'\x00\x10'+ch_num_byte+b'\x00'+stamp))
self.write(b'\x00\x10'+ch_num_byte+b'\x00'+stamp)
time.sleep(0.001)
def rd_stamp_set(self, ch_num, stamp_list):
"""
:param ch_num: int; range(0, 16, 1)
:param stamp_list: list (of int), length = 3; default is shown below
ch_num_list = [2,3,4]
stamp_list_rd = [[2,7,9],[4,6,9],[3,5,12]]
:return:
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
stamp = b''
for index_1 in range(3):
stamp += num_to_bytes(stamp_list[index_1] % 128, 1)
print(bytes_to_hexstr(b'\x00\x11'+ch_num_byte+b'\x00'+stamp))
self.write(b'\x00\x11'+ch_num_byte+b'\x00'+stamp)
time.sleep(0.001)
def ad5371_wr_stamp_set(self, stamp_list):
"""
:param stamp_list: list (of int), length = 3; default: [0,1,3]
:return:
"""
stamp = b''
for index_1 in range(3):
stamp += (num_to_bytes(stamp_list[index_1] % 128, 1))
print(bytes_to_hexstr(b'\x00\x35\x00'+stamp))
self.write(b'\x00\x35\x00'+stamp)
time.sleep(0.001)
def ad5371_play_set(self, num_of_ch, time_list):
"""
:param num_of_ch: int; range(1, 41, 1)
:param time_list: list (of int), length = 3; default(also minmum): [106,59,111]
:return:
"""
play_para = num_to_bytes(num_of_ch % 64, 1)
for index_1 in range(3):
play_para += num_to_bytes(time_list[index_1] % 4096, 2)
print(bytes_to_hexstr(b'\x00\x36\x00'+play_para))
self.write(b'\x00\x36\x00'+play_para)
time.sleep(0.001)
#################################################################
# To download or check DDS/TTL data
#
# Include 4 functions
#################################################################
def dds_data_download(self, ch_num_byte, dds_download_data, print_sign=False):
"""
:param ch_num_byte: bytes, length = 2;
:param dds_download_data: bytes, length = 4 + 10*N (1+8 is valid)
:param print_sign: bool
:return:
"""
# print 'into dds_download'
if print_sign:
print('Into dds_download ', bytes_to_hexstr(b'\x00\x02' + ch_num_byte + dds_download_data[0:4]))
print(bytes_to_hexstr(dds_download_data[4:]))
# print(len(b'\x00\x02' + ch_num_byte + dds_download_data))
data = b'\x00\x02' + ch_num_byte + dds_download_data
self.write(data)
time.sleep(0.001)
def dds_data_check(self, ch_num, dds_download_data):
# for downloading, the dds_data share the same interface, not for checking
"""
:param ch_num: int; range(0, 16, 1)
:param dds_download_data: bytes, length = 4 + 10*N (1+8 is valid)
:return: True/False
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
if ch_num < 4:
print(bytes_to_hexstr(b'\x00\x03' + ch_num_byte + dds_download_data[0:4]))
self.write(b'\x00\x03' + ch_num_byte + dds_download_data[0:4])
else:
# print bytes_to_hexstr(b'\x00\x13' + ch_num_byte + dds_download_data[0:4])
self.write(b'\x00\x13' + ch_num_byte + dds_download_data[0:4])
time.sleep(0.001)
check_data = b''
zero_len_cnt = 0 # to count the times of readout's length is 0
while len(check_data) < len(dds_download_data[4:]):
check_data_temp = self.read()
if zero_len_cnt == 3:
print('check data is ', bytes_to_hexstr(check_data))
print('right data is ', bytes_to_hexstr(dds_download_data[4:]))
break
if len(check_data_temp) == 0:
zero_len_cnt += 1
time.sleep(0.001)
continue
# print bytes_to_hexstr(check_data_temp)
check_data += check_data_temp
# print bytes_to_hexstr(check_data)
if check_data == dds_download_data[4:]:
return True
else:
print('error')
if len(check_data) == len(dds_download_data[4:]):
print('len is okay')
# print bytes_to_hexstr(check_data)
return False
def ttl_data_download(self, ch_num_byte, ttl_download_data, print_sign=False):
"""
:param ch_num_byte: bytes, length = 2;
:param ttl_download_data: bytes, length = 4 + 4*N (27bits are valid)
:param print_sign: bool
:return:
"""
if print_sign:
print('ttl_data_download ', bytes_to_hexstr(b'\x00\x04' + ch_num_byte + ttl_download_data[0:4]))
print(bytes_to_hexstr(ttl_download_data[4:]))
data = b'\x00\x04' + ch_num_byte + ttl_download_data
self.write(data)
time.sleep(0.001)
def ttl_data_check(self, ch_num, ttl_download_data):
# for downloading, the ttl_data share the same interface, not for checking
"""
:param ch_num: int; range(0, 16, 1)
:param ttl_download_data: bytes, length = 4 + 4*N (27bits are valid)
:return: True/False
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
if ch_num < 4:
# print bytes_to_hexstr(b'\x00\x05' + ch_num_byte + ttl_download_data[0:4])
self.write(b'\x00\x05' + ch_num_byte + ttl_download_data[0:4])
else:
# print bytes_to_hexstr(b'\x00\x15' + ch_num_byte + ttl_download_data[0:4])
self.write(b'\x00\x15' + ch_num_byte + ttl_download_data[0:4])
time.sleep(0.001)
check_data = b''
zero_len_cnt = 0
while len(check_data) < len(ttl_download_data[4:]):
check_data_temp = self.read()
if zero_len_cnt == 3:
print('check data is ', bytes_to_hexstr(check_data))
print('right data is ', bytes_to_hexstr(ttl_download_data[4:]))
break
if len(check_data_temp) == 0:
zero_len_cnt += 1
time.sleep(0.001)
continue
check_data += check_data_temp
if check_data == ttl_download_data[4:]:
return True
else:
print('error')
if len(check_data) == len(ttl_download_data[4:]):
print('len is okay')
# print bytes_to_hexstr(check_data)
return False
def scan_data_download(self, scan_download_data, print_sign=False):
"""
:param scan_download_data: bytes, length = 4 + 18*N (144bits in total)
:param print_sign: bool
:return:
"""
if print_sign:
print('scan_data_download ', bytes_to_hexstr(b'\x00\x40' + scan_download_data[0:4]))
print(bytes_to_hexstr(scan_download_data[4:]))
data = b'\x00\x40' + scan_download_data
self.write(data)
time.sleep(0.001)
#
# def scan_data_download(self, var_type, scan_para_list):
# """
# :param var_type: int, value: [0,1,2,3,4] represents ["no scan","amp","freq","phase","time"]
# :param scan_para_list: list (of [data, N_i])
# :return:
# """
# scan_para_byte = b''
# if var_type == 0:
# scan_para_byte = self.scan_gen_0(scan_para_list)
# elif var_type == 1:
# scan_para_byte = self.scan_gen_1(scan_para_list)
# elif var_type == 2:
# scan_para_byte = self.scan_gen_2(scan_para_list)
# elif var_type == 3:
# scan_para_byte = self.scan_gen_3(scan_para_list)
# elif var_type == 4:
# scan_para_byte = self.scan_gen_4(scan_para_list)
# else:
# print('incorrect input for var_type')
# exit()
#
# scan_addr_start = num_to_bytes(0, 2)
# scan_addr_stop = num_to_bytes(int(len(scan_para_byte)/10) - 1, 2)
# self.write(b'\x00\x01' + scan_addr_start + scan_addr_stop + scan_para_byte)
def scan_data_check(self, scan_download_data):
"""
:param scan_download_data: bytes, length = 4 + 18*N (144bits in total)
:return: True/False
"""
print(bytes_to_hexstr(b'\x00\x41' + scan_download_data[0:4]))
self.write(b'\x00\x41' + scan_download_data[0:4])
time.sleep(0.001)
check_data = b''
zero_len_cnt = 0 # to count the times of readout's length is 0
while len(check_data) < len(scan_download_data[4:]):
check_data_temp = self.read()
if zero_len_cnt == 3:
print('check data is ', bytes_to_hexstr(check_data))
print('right data is ', bytes_to_hexstr(scan_download_data[4:]))
break
if len(check_data_temp) == 0:
zero_len_cnt += 1
time.sleep(0.001)
continue
# print bytes_to_hexstr(check_data_temp)
check_data += check_data_temp
# print bytes_to_hexstr(check_data)
if check_data == scan_download_data[4:]:
return True
else:
print('error')
if len(check_data) == len(scan_download_data[4:]):
print('len is okay')
# print bytes_to_hexstr(check_data)
return False
def ad5371_data_download(self, dac_download_data, print_sign=False):
"""
:param dac_download_data: bytes, length = 6 + 4*N (32bits in total)
:param print_sign: bool
:return:
"""
if print_sign:
print('ad5371_data_download ', bytes_to_hexstr(b'\x00\x32' + dac_download_data[0:6]))
print(bytes_to_hexstr(dac_download_data[6:]))
data = b'\x00\x32' + dac_download_data
self.write(data)
time.sleep(0.001)
def ad5371_data_check(self, dac_download_data):
"""
:param dac_download_data: bytes, length = 4*N (3 is valid)
:return: True/False
"""
print(bytes_to_hexstr(b'\x00\x33' + dac_download_data[0:6]))
self.write(b'\x00\x33' + dac_download_data[0:6])
time.sleep(0.001)
check_data = b''
zero_len_cnt = 0 # to count the times of readout's length is 0
while len(check_data) < len(dac_download_data[6:]):
check_data_temp = self.read()
if zero_len_cnt == 3:
print('check data is ', bytes_to_hexstr(check_data))
print('right data is ', bytes_to_hexstr(dac_download_data[6:]))
break
if len(check_data_temp) == 0:
zero_len_cnt += 1
time.sleep(0.001)
continue
# print bytes_to_hexstr(check_data_temp)
check_data += check_data_temp
# print bytes_to_hexstr(check_data)
if check_data == dac_download_data[6:]:
return True
else:
print('error')
if len(check_data) == len(dac_download_data[6:]):
print('len is okay')
# print bytes_to_hexstr(check_data)
return False
def dac_ad5371_data_download(self, ch_num_list, raw_wave_list, check_sign=False):
""" AD5371 low sample rate DAC的数据下载"""
if len(ch_num_list) != len(raw_wave_list[0]):
print('mismatch of ch_num and data_list')
exit()
else:
dac_download_data = self.ad5371_data_gen(ch_num_list, raw_wave_list)
self.ad5371_data_download(dac_download_data, print_sign=True)
if check_sign:
if not self.ad5371_data_check(dac_download_data):
self.write(b'\x00\x00')
print('AD5371 data download check fail')
exit()
else:
print('AD5371 data download has been finished with check')
addr_start = dac_download_data[0:3]
addr_stop = dac_download_data[3:6]
return addr_start, addr_stop
#################################################################
# 多(单)通道的播放指令
#
# Only 2 functions
#################################################################
def play_sequence_set(self, ch_num_list, play_address_word, print_sign=False):
"""
:param ch_num_list: list (of int), length: range(1, 17, 1)
:param play_address_word: bytes, length = 6*len(ch_num_list)
:param print_sign: bool
:return:
"""
ch_num_sum = 0
for index_1 in range(len(ch_num_list)):
ch_num_sum += 2**ch_num_list[index_1]
ch_num_sum_byte = num_to_bytes(ch_num_sum, 2)
self.write(b'\x00\x1A' + ch_num_sum_byte + play_address_word)
if print_sign:
print('\nThe play_sequence_set bytes are ',
bytes_to_hexstr(b'\x00\x1A' + ch_num_sum_byte + play_address_word))
# def play(self, scan_para_list):
# """
# :param scan_para_list: list (of [scan_para, N_i])
# :return:
# """
# scan_addr_start = num_to_bytes(0, 2)
# scan_addr_stop = num_to_bytes(int(len(scan_para_list)) - 1, 2)
# self.write(b'\x00\x01' + scan_addr_start + scan_addr_stop)
#################################################################
# To set the Para
#
# Include 2 functions
#################################################################
def stamp_reset(self):
""" 重置读写的速率(平时不使用,只有出错时,快速返回默认值用的)"""
ch_num_list = [2, 3, 4]
stamp_list_wr = [[0, 1, 3], [0, 2, 5], [0, 2, 5]]
stamp_list_rd = [[2, 7, 9], [4, 6, 9], [3, 5, 12]]
for index_1 in range(3):
self.wr_stamp_set(ch_num_list[index_1], stamp_list_wr[index_1])
self.rd_stamp_set(ch_num_list[index_1], stamp_list_rd[index_1])
# def delay_para_set(self): #para set backup
# self.sync_delay_set(4,15)
# self.sync_delay_set(2,12) # from 9 to 14 (8, 15 is not okay)
# for ii in range(4):
# self.ttl_coarse_delay_set(ii,7)
# self.ttl_serdese_delay_set(ii,0)
# self.ttl_odelaye_delay_set(ii,0)
# for ii in range(12):
# self.ttl_coarse_delay_set(ii+4,7)
# self.ttl_serdese_delay_set(ii+4,4)
# self.ttl_odelaye_delay_set(ii+4,0)
#
# self.dds_serdese_delay_set(ii+4,0)
def delay_para_set(self):
""" 设置延时(确定后不用调整)"""
self.sync_delay_set(4, 15)
self.sync_delay_set(2, 12) # from 9 to 14 (8, 15 is not okay)
for index_1 in range(4):
self.ttl_coarse_delay_set(index_1, 7)
self.ttl_serdese_delay_set(index_1, 0)
self.ttl_odelaye_delay_set(index_1, 0)
for index_1 in range(12):
self.ttl_coarse_delay_set(index_1+4, 7)
self.ttl_serdese_delay_set(index_1+4, 4)
self.ttl_odelaye_delay_set(index_1+4, 0)
self.dds_serdese_delay_set(index_1+4, 0)
#################################################################
# 2.5GSPS / 1GSPS 的DDS的初始化配置、相位清零、手动同步
#
# Include 6 functions
#################################################################
def initial_AD9915(self, ch_num):
""" 2.5GSPS dds的初始化配置"""
# print ('channel-%d initial' %ch_num)
ch_num_byte = num_to_bytes(2**ch_num, 2)
self.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x01\x0A') # con-phase and amp en
self.s_configure(ch_num_byte, b'\x01', b'\x00\x80\x80\x00')
self.s_configure(ch_num_byte, b'\x03', b'\x01\x05\x21\x20') # DAC_CAL en
self.s_configure(ch_num_byte, b'\x03', b'\x00\x05\x21\x20') # DAC_CAL disable
# print 'initial finished!'
# print(self.s_read(ch_num_byte, b'\x01', b'\x00\x80\x80\x00'))
# print(self.l_read(ch_num_byte, b'\x0B', b'\x00\x00\x00\x00\x00\x00\x00\x00'))
def phase_clear_2g5(self, ch_num):
""" 2.5GSPS dds的相位清零"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
self.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x09\x0A') # asynchronous phase clear set
self.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x01\x0A') # the bit clear
# print 'phase accumulator has been cleared!!'
def mannual_sync_2g5(self):
""" 2.5GSPS dds的手动同步"""
ch_num_byte_list = [b'\x00\x02', b'\x00\x01', b'\x00\x04', b'\x00\x08']
# self.sync_on()
# time.sleep(0.003)
# self.s_configure(ch_num_byte_list[0], b'\x01', b'\x00\x80\x83\x00')#enable SYNC_OUT
for index_1 in range(4):
self.s_configure(ch_num_byte_list[index_1], b'\x1B', b'\x00\x00\x08\x40')
self.s_configure(ch_num_byte_list[index_1], b'\x03', b'\x01\x05\x21\x20') # DAC_CAL en
self.s_configure(ch_num_byte_list[index_1], b'\x03', b'\x00\x05\x21\x20') # DAC_CAL disable
# self.sync_off()
# self.s_configure(ch_num_byte_list[3], b'\x01', b'\x00\x80\x80\x00')#disable SYNC_OUT
# print 'SYNC process has been finished!!'
def initial_ad9910(self, ch_num):
""" 1GSPS dds的初始化配置"""
# sine waveform
# print ('channel-%d initial' %ch_num)
ch_num_byte = num_to_bytes(2**ch_num, 2)
self.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x00\x02')
self.s_configure(ch_num_byte, b'\x01', b'\x01\x40\x00\xA0')
self.s_configure(ch_num_byte, b'\x02', b'\x1F\x3F\xC0\x00')
self.s_configure(ch_num_byte, b'\x03', b'\x00\x00\x00\xFF') # from 7F to FF
# print 'initial finished!'
# print(self.s_read(ch_num_byte, b'\x01', b'\x01\x40\x00\xA0'))
# print(self.l_read(ch_num_byte, b'\x0E', b'\x08\xB5\x00\x00\x00\x00\x00\x00'))
def phase_clear_1g(self, ch_num):
""" 1GSPS dds的相位清零"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
self.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x08\x02')
self.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x00\x02')
# print 'phase accumulator has been cleared!!'
def mannual_sync_1g(self):
""" 1GSPS dds的手动同步"""
# self.sync_on()
# time.sleep(0.001)
# self.s_configure(b'\x00\x10', b'\x0A', b'\x0C\x00\x00\x00')
for index_1 in range(12):
# print index_1+5
ch_num_byte = num_to_bytes(2**(index_1+4), 2)
self.s_configure(ch_num_byte, b'\x0A', b'\x08\x00\x00\xf8')
# [7:0] set 0x00 to 0x58 ([2:0] is not used)-----6~17
# [7:0] set 0x30 to 0x88 ([2:0] is not used)-----6~17
self.s_configure(ch_num_byte, b'\x0A', b'\x00\x00\x00\x00')
# self.s_configure(b'\x00\x10', b'\x0A', b'\x00\x00\x00\x00')
# self.sync_off()
# print 'SYNC process has been finished!!'
#################################################################
# 单通道的数据下载
#
# Only 1 function
#################################################################
def single_data_download(self, ch_num, raw_data_list, check_sign, print_sign=False):
"""
:param ch_num: number of channel
:type ch_num: int
:param raw_data_list: [[A,f(MHz),fai(pi)],[level,time],..]
: amp: float, range: [0,1]
: freq: int or float, unit: MHz
: phase: float, unit: pi, range: [0,2)
: level: str, 'high'/'low'
: time: float, unit: us
:param check_sign: True/False means Enable/Disable check_process
:type check_sign: bool
:param print_sign: True/False means Enable/Disable Print the download bytes
:type print_sign: bool
:returns: List of Bytes
:rtype: [bytes, bytes, bytes, bytes]; length_3rd = 6
"""
ch_num_byte = num_to_bytes(2**ch_num, 2)
hp_channel, reg_wr = self.ch2identify(ch_num) # in this part reg_wr is useless
self.raw_data_list_pro(raw_data_list)
# self.raw_data_list_pro(raw_data_list)
if print_sign:
print('\nChannel %d data download start' % ch_num)
print(raw_data_list)
pulse_list = self.pulse_data_gen(hp_channel, raw_data_list)
# self.raw_data_list_after_pro(raw_data_list)
self.raw_data_list_after_pro(raw_data_list)
self.dds_data_download(ch_num_byte, pulse_list[0], print_sign)
if check_sign:
if not self.dds_data_check(ch_num, pulse_list[0]):
self.write(b'\x00\x00')
print('channel-%d dds_data download check fail' % ch_num)
# if not self.dds_data_check(ch_num, pulse_list[0]):
# print 'channel-%d dds_data download fails' %ch_num
exit()
else:
print('channel-%d dds_data download has been finished with check' % ch_num)
self.ttl_data_download(ch_num_byte, pulse_list[1], print_sign)
if check_sign:
if not self.ttl_data_check(ch_num, pulse_list[1]):
self.write(b'\x00\x00')
print('channel-%d ttl_data download check fail' % ch_num)
# if not self.ttl_data_check(ch_num, pulse_list[1]):
# print 'channel-%d ttl_data download fails' %ch_num
exit()
else:
print('channel-%d ttl_data download has been finished with check' % ch_num)
# time.sleep(0.001)
return pulse_list[2] # retrun the address for play
#################################################################
# test function only for the hardware debug, useless for application
#
# Included 6 functions
#################################################################
'''
####### mode set for IO_A0 and IO_A1
def IO_mode_set(self, mode_num): # this one is a function for my test
mode_num_byte = num_to_bytes(mode_num, 2)
print(bytes_to_hexstr(b'\x00\x20' + mode_num_byte))
self.write(b'\x00\x20' + mode_num_byte)
time.sleep(0.001)
#######################################protocol test function
#######这一部分主要是为了测试DDS的读/写的速率
def SPI_TEST(self, ch_num, wr_en):
if ch_num>=3:
ini_stamp = [25,50,100,[1,2,4]]
else:
if wr_en == '1':
ini_stamp = [1,10,20,[0,1,2]]
else:
ini_stamp = [10,20,30,[1,2,3]]
stamp_list=[25,50,100]
while True:
for ii in range(3):
stamp_list[ii] = ini_stamp[ii]-1
if wr_en == '1':
self.wr_stamp_set(ch_num, stamp_list)
else:
self.rd_stamp_set(ch_num, stamp_list)
check_reuslt = self.data_check(ch_num, 4)
print (check_reuslt)
if not check_reuslt or (ini_stamp[0] == 1 and wr_en == '1'):
print("when stamp0, stamp1, stamp2 is %d, %d, %d, the error begins"
%(ini_stamp[0],ini_stamp[1],ini_stamp[2]))
break
else:
for ii in range(3):
ini_stamp[ii] -= ini_stamp[3][ii]
def data_check(self, ch_num, check_times):# ch,
ch_num_byte = num_to_bytes(2**ch_num, 2)
error_times = 0
check_data_list = [b'\x00\x00\x00\x00', b'\x19\x99\x99\x99', b'\x0F\xFF\xFF\xFF',
b'\x19\x99\x99\x99', b'\x0F\xFF\xFF\xFF']
if ch_num < 4:
reg_wr = b'\x0B'
else:
reg_wr = b'\x0E'
for ii in range(check_times):
self.l_configure(ch_num_byte, reg_wr, b'\x00\x00\x00\x00'+check_data_list[ii+1])
rd_result, compare_result = self.l_read(ch_num_byte, reg_wr, b'\x00\x00\x00\x00'+check_data_list[ii+1])
if compare_result == True:
continue
else:
error_times += 1
print ("the error time is %d" %ii)
print (rd_result+' '+bytes_to_hexstr(check_data_list[ii+1]))
break
if error_times == 0:
return(True)
else:
print ('the error_times in %d is %d' %(check_times,error_times))
return(False)
def AD5371_SPI_TEST(self):
dec_step_stamp = [1,2,4]
ini_stamp=[6,12,24]
# ini_stamp=[25,50,100]
stamp_list=[25,50,100]
while True:
for ii in range(3):
stamp_list[ii] = ini_stamp[ii]-1
self.AD5371_wr_stamp_set(stamp_list)
# check_reuslt = self.data_check(ch_num, 4)
# print check_reuslt
# if not check_reuslt or (ini_stamp[0] == 1 and wr_en == '1'):
print("when stamp0, stamp1, stamp2 is %d, %d, %d" %(ini_stamp[0],ini_stamp[1],ini_stamp[2]))
fpga.write(b'\x00\x31'+b'\x00'+b'\xC9'+b'\xFF\xFC') # X_0 = +10
time.sleep(1)
fpga.write(b'\x00\x31'+b'\x00'+b'\xC9'+b'\x80\x00') # X_0 = 0
time.sleep(1)
fpga.write(b'\x00\x31'+b'\x00'+b'\xC9'+b'\x00\x00') # X_0 = -10
time.sleep(10)
for ii in range(3):
ini_stamp[ii] -= dec_step_stamp[ii]
if ini_stamp[0] == 0:
break
####### 单通道的“测试”数据下载
def single_test_download(self, ch_num):
# ch_num_byte = num_to_bytes(2**ch_num, 2)
raw_data_list = [ [[1,200,0],['high',5]], [[0,200,0],['low',5]], [[1,200,0],['high',5],['low',5]] ]
return(self.single_data_download(ch_num, raw_data_list))
def test_download(self, ch_num_list):
play_address_word = b''
for ii in range(len(ch_num_list)):
play_address_word_temp = self.single_test_download(ch_num_list[ii])
play_address_word += play_address_word_temp
print ('test data-download of channel ',ch_num_list,' has been finished')
return (play_address_word)
'''
"""To get the bytes format of phase
:param phase: Frequency of DDS
:type phase: float
:param hp_channel: A flag used to distinguish the channel. True/False means 2G5-DDS/1G-DDS
:type hp_channel: bool
:returns: Bytes representing the phase
:rtype: bytes, length = 2 (16bit valid)
"""
""" To enable the SYNC signal output"""
# class FPGA(HardWare): # GenWave,
# """ A class used for integration
#
# """
#
# def __init__(self, dev_index=0, test_mode=False):
# """ To launch the Instantiation of classes"""
# # GenWave.__init__(self)
# HardWare.__init__(self, dev_index=dev_index, test_mode=test_mode)
#
# def cw_play(self, ch_num, amp, freq, phase):
# """ 单通道DDS的播放特定波形(连续播放————测试频谱时使用)"""
# hp_channel, reg_wr = self.ch2identify(ch_num)
# ch_num_byte = num_to_bytes(2**ch_num, 2)
#
# dds_data_list = self.dds_data_form(hp_channel, amp, freq, phase)
# print(bytes_to_hexstr(dds_data_list[0]))
# self.l_configure(ch_num_byte, reg_wr, dds_data_list[0])
# return dds_data_list[1], dds_data_list[2]
#
# def ad5371_ini(self):
# fpga.write(b'\x00\x31'+b'\x00'+b'\x02'+b'\x20\x00') # the b'\x02' can be b'\x03',b'\x04'
# fpga.write(b'\x00\x31'+b'\x00'+b'\x03'+b'\x20\x00') # the OFS_g1 is set to be +10V.
# fpga.write(b'\x00\x31'+b'\x00'+b'\x04'+b'\x20\x00') # the OFS_g2~4 is set to be +10V.
#
# fpga.write(b'\x00\x31'+b'\x00'+b'\x80'+b'\x80\x00') # C
# fpga.write(b'\x00\x31'+b'\x00'+b'\x40'+b'\xFF\xFC') # M
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC0'+b'\x80\x00') # X = +10
#
# """
# ini_stamp=[1,2,4]
# stamp_list=[25,50,100]
# for index_1 in range(3):
# stamp_list[index_1] = ini_stamp[index_1]-1
# """
# stamp_list = [0, 1, 3]
# self.AD5371_wr_stamp_set(stamp_list)
# print('AD5371 initial has been finished')
#
# #################################################################
# # integration-experiment function
# # 以下都是支持多个通道的操作
# #################################################################
# def initial_dds(self, ch_num_list):
# """ 多通道DDS的初始化配置以及同步"""
# self.delay_para_set()
# self.sync_on()
# for index_1 in range(len(ch_num_list)):
# if ch_num_list[index_1] < 4:
# self.initial_AD9915(ch_num_list[index_1])
# else:
# self.initial_ad9910(ch_num_list[index_1])
# self.mannual_sync_2g5()
# self.mannual_sync_1g()
# self.sync_off()
# # self.stamp_reset() #when there are some bugs, this one will be used
# print('channel ', ch_num_list, ' initial has been finished')
#
# def phase_clear_dds(self, ch_num_list):
# """ 多通道DDS的相位清空"""
# for index_1 in range(len(ch_num_list)):
# if ch_num_list[index_1] < 4:
# self.phase_clear_2g5(ch_num_list[index_1])
# else:
# self.phase_clear_1g(ch_num_list[index_1])
# # print 'phase of channel ',ch_num_list,' has been cleared'
#
# def sequence_data_download(self, ch_num_list, raw_data_list_list, check_sign=False):
# """ 多通道的数据下载"""
# if len(ch_num_list) != len(raw_data_list_list):
# print('mismatch of ch_num and data_list')
# exit()
# else:
# play_address_word = b''
# for index_1 in range(len(ch_num_list)):
# raw_data_list_temp = raw_data_list_list[index_1]
# play_address_word_temp = self.single_data_download(ch_num_list[index_1], raw_data_list_temp, check_sign)
# play_address_word += play_address_word_temp
# print('data-download of channel ', ch_num_list, ' has been finished')
# self.play_sequence_set(ch_num_list, play_address_word)
# # return play_address_word
#
# def play(self, var_type, scan_para_list, check_sign=False):
# scan_para_gen = self.scan_data_gen(var_type, scan_para_list)
# self.scan_data_download(scan_para_gen[0])
# if check_sign:
# if not self.scan_data_check(scan_para_gen[0]):
# self.write(b'\x00\x00')
# print('Scan_data download check failed!')
# exit()
# self.write(b'\x00\x01' + scan_para_gen[0][0:4])
# self.counter_receive(scan_para_gen[1])
#
# def counter_receive(self, cnt_number):
# readout_bytes = b''
# cnt_result_list = []
# while True:
# temp = self.read()
# readout_bytes += temp
# if readout_bytes[0:2] == b'\xFF\xFA': # start sign
# readout_bytes = readout_bytes[2:]
# cnt_addr_start = bytes_to_num(readout_bytes[0:2])
# elif readout_bytes[0:2] == b'\xFF\xF5': # start sign
# readout_bytes = readout_bytes[2:]
# cnt_addr_stop = bytes_to_num(readout_bytes[0:2])
# break
# else:
# if readout_bytes[0:2] == b'\xFF\xF8':
# cnt_result_list.append('overflow')
# else:
# cnt_result_list.append(bytes_to_num(readout_bytes[0:2]))
# readout_bytes = readout_bytes[2:]
#
# print('the start and stop of cnt_addr are %d, %d' % (cnt_addr_start, cnt_addr_stop))
# print('The length of result is %d' % len(cnt_result_list))
# if cnt_number == (cnt_addr_stop-cnt_addr_start) + 1:
# print('The cnt_number match the input scan number')
# else:
# print('The cnt_number miss match')
if __name__ == '__main__':
fpga = HardWare(1)
# fpga.write(b'\x00\x00')
# # fpga=FPGA(0)
# # global fpga
# fpga.dll.flushInputBuffer()
# # fpga.datasave()
#
# t1=time.time()
# print 'time consumpted', time.time()-t1
# fpga.IO_mode_set(5)
# fpga.s_configure(b'\x00\x04',b'\x01', b'\x00\x80\x88\x00')#SYNC_CLK output enable
# fpga.s_configure(b'\x00\x02',b'\x01', b'\x00\x80\x83\x00')#SYNC_OUT enable
# fpga.sync_off()
# [[A,f(Hz),fai(pi)], [on,rise moment(us)], [off,fall moment(us)]]
# [[A,f(Hz),fai(pi)],[time, t]]
# ###main function of experiment(end)
# fpga.initial_dds([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
# z1 = [b'\x00\x00\x00\x00',b'\x00\x00\x00\x01']
#
# for ii in range(4):
# index = ii%2
# print index
# fpga.s_configure(b'\x00\x06',b'\x0E',z1[index])
# a = fpga.s_read(b'\x00\x02',b'\x0E',z1[index])
# b = fpga.s_read(b'\x00\x08',b'\x0E',z1[index])
# print a,b
# z2 = [b'\x00\x00\x00\x00\x00\x00\x00\x00',b'\x00\x00\x00\x00\x00\x00\x00\x01']
#
# for ii in range(4):
# index = ii%2
# print ii
# fpga.l_configure(b'\xFF\xF0',b'\x0E',z2[index])
# a1 = fpga.l_read(b'\x00\x10',b'\x0E',z2[index])
# b1 = fpga.l_read(b'\x00\x20',b'\x0E',z2[index])
# a2 = fpga.l_read(b'\x00\x40',b'\x0E',z2[index])
# b2 = fpga.l_read(b'\x00\x80',b'\x0E',z2[index])
# c1 = fpga.l_read(b'\x01\x00',b'\x0E',z2[index])
# d1 = fpga.l_read(b'\x02\x00',b'\x0E',z2[index])
# c2 = fpga.l_read(b'\x04\x00',b'\x0E',z2[index])
# d2 = fpga.l_read(b'\x08\x00',b'\x0E',z2[index])
# x1 = fpga.l_read(b'\x10\x00',b'\x0E',z2[index])
# y1 = fpga.l_read(b'\x20\x00',b'\x0E',z2[index])
# x2 = fpga.l_read(b'\x40\x00',b'\x0E',z2[index])
# y2 = fpga.l_read(b'\x80\x00',b'\x0E',z2[index])
# print a1,b1,a2,b2 #a,b,a,b
# print c1,d1,c2,d2
# print x1,y1,x2,y2
# fpga.auto_clear_on()
# pulse_width1 = 0.1536
# pulse_width2 = 3.520
# pulse_width_ex = 3.1232#3.1168
# # #sync test
#
# pulse_width = 4
# raw_data_list_list = []
# for ii in range(16):
# raw_data_list_list.append([])
#
# cycles = 10
#
# for ii in range(16):
# for jj in range(cycles):
# raw_data_list_list[ii].extend([[[1, 200, 0], ['high', pulse_width]], [[0, 200, 0], ['low', pulse_width]]])
# print(raw_data_list_list[2])
#
# t1 = time.time()
# #
# for ii in range(1): # 100 ~ 95 s 32400~8.5h
#
# fpga.initial_dds([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
# fpga.phase_clear_dds([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
# print('time consumpted', time.time()-t1)
#
# play_ch_num_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
#
# fpga.phase_clear_dds(play_ch_num_list)
# play_address_word = fpga.data_download(play_ch_num_list, raw_data_list_list)
# # play_address_word = fpga.test_download(play_ch_num_list)
# # print bytes_to_hexstr(play_address_word)
# print('time consumpted', time.time()-t1)
#
# t2=time.time()
# for jj in range(1):
# fpga.play(play_ch_num_list, play_address_word)
# # t2=time.time()
# # time.sleep(0.001)
# while True:
# a = fpga.read()
# if a == b'\x80\x80':
# print(bytes_to_hexstr(a))
# break
# else:
# if len(a) != 0:
# print(bytes_to_hexstr(a))
# print('time consumpted', time.time()-t2)
# # fpga.phase_clear_dds([0,2])#change the list
# #
# # a = fpga.read()
# # print bytes_to_hexstr(a)
# # fpga.phase_clear_dds([0])
#
# # time.sleep(0.001)
# print('')
# print('time consumpted', time.time()-t1)
# ############## python3 test
# ch_num_byte = b'\x00\x01'
# # fpga.initial_AD9915(1)
# print(b'\x00\x06' + ch_num_byte + b'\x00' + b'\x00\x00\x01\x01\x0A')
# print(len(b'\x00\x06' + ch_num_byte + b'\x00' + b'\x00\x00\x01\x01\x0A'))
# print(bytes_to_hexstr(b'\x00\x06' + ch_num_byte + b'\x00' + b'\x00\x00\x01\x01\x0A'))
# fpga.write(b'\x00\x06' + ch_num_byte + b'\x00' + b'\x00\x00\x01\x01\x0A')
#
# fpga.s_configure(ch_num_byte, b'\x00', b'\x00\x01\x01\x0A')#con-phase and amp en
# fpga.s_configure(ch_num_byte, b'\x01', b'\x00\x80\x80\x00')
# fpga.s_configure(ch_num_byte, b'\x03', b'\x01\x05\x21\x20')#DAC_CAL en
# fpga.s_configure(ch_num_byte, b'\x03', b'\x00\x05\x21\x20')#DAC_CAL disable
# print('initial finished!')
# print(fpga.s_read(ch_num_byte, b'\x01', b'\x00\x80\x80\x00'))
# print(fpga.l_read(ch_num_byte, b'\x0B', b'\x00\x00\x00\x00\x00\x00\x00\x00'))
# # ##test spectrum
# #
# t1=time.time()
# #
# for ii in range(1): #100 ~ 95 s 32400~8.5h
# fpga.initial_dds([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
# fpga.phase_clear_dds([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
# print ('time consumpted', time.time()-t1)
# frequency = 600
# test_time = 1
# t1=time.time()
# a, b = fpga.cw_play(0,1,frequency,0)
# print (a ,' ',b)
#
# for ii in range(test_time):
# print (ii,' ')
# time.sleep(1)
# print ('time consumpted', time.time()-t1)
# # fpga.cw_play(4,0,0,0)
# # fpga.cw_play(ii,1,frequency,0)
# fpga.write(b'\x00\x31'+b'\x00'+b'\x02'+b'\x20\x00') # the b'\x02' can be b'\x03',b'\x04'
#
# fpga.write(b'\x00\x31'+b'\x00'+b'\x80'+b'\x80\x00') # C
# fpga.write(b'\x00\x31'+b'\x00'+b'\x40'+b'\xFF\xFC') # M
#
# time.sleep(1)
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC0'+b'\xFF\xFC') # X = +10
# time.sleep(1)
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC0'+b'\x80\x00') # X = 0
# time.sleep(1)
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC0'+b'\x00\x00') # X = -10
# fpga.AD5371_SPI_TEST()
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC1'+b'\x80\x00') # X_G0_0 = 0
# ini_stamp=[1,2,4]
# stamp_list=[25,50,100]
# for ii in range(3):
# stamp_list[ii] = ini_stamp[ii]-1
# fpga.AD5371_wr_stamp_set(stamp_list)
# print("when stamp0, stamp1, stamp2 is %d, %d, %d" %(ini_stamp[0],ini_stamp[1],ini_stamp[2]))
# for ii in range(10):
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC9'+b'\x80\x00') # X_G0_1 = +10
# time.sleep(2)
# # fpga.write(b'\x00\x31'+b'\x00'+b'\xC9'+b'\x80\x00') # X_G0_1 = 0
# # time.sleep(5)
# fpga.write(b'\x00\x31'+b'\x00'+b'\xC9'+b'\x7F\xFC') # X_G0_1 = -10
#
# time.sleep(2)
# # a = np.dtype('<i4')
# for ii in range(258):
# a = num_to_bytes(ii, 2)
# c = a.hex()
# print('when ii is ', ii, ', a is ', a,', the ord() is ', c)
# b = bytes_to_num(a)
# print(b)
# # b = int.from_bytes(a, byteorder='big')
# # print(b)
# # b = int.from_bytes(a, byteorder='little')
# # print(b)
# fpga.write(b'\x00\x00')
# fpga.ad5371_ini()
# g0_ch_list = [b'\x00\xC8',b'\x00\xC9',b'\x00\xCA',b'\x00\xCB',b'\x00\xCC',b'\x00\xCD',b'\x00\xCE',b'\x00\xCF']
# g1_ch_list = [b'\x00\xD0',b'\x00\xD1',b'\x00\xD2',b'\x00\xD3',b'\x00\xD4',b'\x00\xD5',b'\x00\xD6',b'\x00\xD7']
# g2_ch_list = [b'\x00\xD8',b'\x00\xD9',b'\x00\xDA',b'\x00\xDB',b'\x00\xDC',b'\x00\xDD',b'\x00\xDE',b'\x00\xDF']
# g3_ch_list = [b'\x00\xE0',b'\x00\xE1',b'\x00\xE2',b'\x00\xE3',b'\x00\xE4',b'\x00\xE5',b'\x00\xE6',b'\x00\xE7']
# g4_ch_list = [b'\x00\xE8',b'\x00\xE9',b'\x00\xEA',b'\x00\xEB',b'\x00\xEC',b'\x00\xED',b'\x00\xEE',b'\x00\xEF']
# y_str = b''
# sin_pts = 50
# ch_num = 3
# for x in range(sin_pts):
# y = np.sin((float(x)/sin_pts+0)*2*np.pi) * (2**13-1) + 2**13
# y_int = num_to_bytes(int(y)*4, 2)
# y_str += g0_ch_list[0]+ y_int + g0_ch_list[1]+ y_int #CC
# # print len(y_str)
# print ('the y and y_int are ', y, ',', bytes_to_hexstr(y_int))
#
# # y_str += g0_ch_list[0]+ b'\x80\x00' + g0_ch_list[4]+ b'\x80\x00' + g0_ch_list[1]+ b'\x80\x00' #CC
# # y_str += g0_ch_list[0]+ b'\xB0\x08' + g0_ch_list[4]+ b'\x90\x08' + g0_ch_list[1]+ b'\x90\x08' #CC
# # y_str += g0_ch_list[0]+ b'\xB0\x08' + g0_ch_list[4]+ b'\x90\x08' + g0_ch_list[1]+ b'\x90\x08' #CC
# # y_str += g0_ch_list[0]+ b'\x80\x00' + g0_ch_list[4]+ b'\x80\x00' + g0_ch_list[1]+ b'\x80\x00' #CC
# data_DAC = b''
# cycles = 2
# for ii in range(cycles):
# data_DAC += y_str
#
# # data_DAC += b'\x00'+b'\xC8'+ b'\x80\x00' + b'\x00'+b'\xCC'+ b'\x80\x00' #CC
#
# addr_start = b'\x00\x00\x00'
# addr_stop = num_to_bytes(int(len(data_DAC)/4) -1, 3)
# print (bytes_to_hexstr(b'\x00\x33' + addr_start + addr_stop))
# fpga.write(b'\x00\x33' + addr_start + addr_stop + data_DAC)
# time.sleep(0.01)
#
# # fpga.write(b'\x00\x36\x01\x19') # 2 us for 00F9
# # time.sleep(0.01)
# fpga.ad5371_play_set(ch_num, [106,59,111])
# # fpga.ad5371_play_set(ch_num, [200,250,200])
# # addr_start = b'\x00\x00\x00'
# # addr_stop = num_to_bytes(int(len(data_DAC)/4) -1, 3)
# fpga.write(b'\x00\x01' + addr_start + addr_stop)
# time.sleep(0.002)
# fpga.write(b'\x00\x01' + addr_start + addr_stop)
# ############ 10 ch test fpga.ad5371_play_set(ch_num = 10, [106,59,111])
#
# g0_ch_list = [b'\x00\xC8',b'\x00\xC9',b'\x00\xCA',b'\x00\xCB',b'\x00\xCC',b'\x00\xCD',b'\x00\xCE',b'\x00\xCF']
# g1_ch_list = [b'\x00\xD0',b'\x00\xD1',b'\x00\xD2',b'\x00\xD3',b'\x00\xD4',b'\x00\xD5',b'\x00\xD6',b'\x00\xD7']
# g2_ch_list = [b'\x00\xD8',b'\x00\xD9',b'\x00\xDA',b'\x00\xDB',b'\x00\xDC',b'\x00\xDD',b'\x00\xDE',b'\x00\xDF']
# g3_ch_list = [b'\x00\xE0',b'\x00\xE1',b'\x00\xE2',b'\x00\xE3',b'\x00\xE4',b'\x00\xE5',b'\x00\xE6',b'\x00\xE7']
# g4_ch_list = [b'\x00\xE8',b'\x00\xE9',b'\x00\xEA',b'\x00\xEB',b'\x00\xEC',b'\x00\xED',b'\x00\xEE',b'\x00\xEF']
#
# y_str = b''
# sin_pts = 50
# ch_num = 10
# for x in range(sin_pts):
# y = np.sin((float(x)/sin_pts+0)*2*np.pi) * (2**13-1) + 2**13
# y_int = num_to_bytes(int(y)*4, 2)
# for ii in range(8):
# y_str += g0_ch_list[ii] + y_int
# y_str += g1_ch_list[0] + y_int
# y_str += g1_ch_list[1] + y_int
# # print len(y_str)
# print('the y and y_int are ', y, ',', bytes_to_hexstr(y_int))
#
#
#
# data_DAC = b''
# cycles = 2
# for ii in range(cycles):
# data_DAC += y_str
#
# # for ii in range(8):
# # y_str += g0_ch_list[ii] + y_int
# #
# # data_DAC += b'\x00'+b'\xC8'+ b'\x80\x00' + b'\x00'+b'\xCC'+ b'\x80\x00' #CC
#
# addr_start = b'\x00\x00\x00'
# addr_stop = num_to_bytes(int(len(data_DAC)/4) -1, 3) #+ch_num
# print(bytes_to_hexstr(b'\x00\x33' + addr_start + addr_stop))
# fpga.write(b'\x00\x33' + addr_start + addr_stop + data_DAC)
# time.sleep(0.01)
#
# # fpga.write(b'\x00\x36\x01\x19') # 2 us for 00F9
# # time.sleep(0.01)
# # fpga.ad5371_play_set(10, [124,74,10])
# # fpga.ad5371_play_set(ch_num, [106,99,10])
# fpga.ad5371_play_set(ch_num, [106,59,111])
# # addr_start = b'\x00\x00\x00'
# # addr_stop = num_to_bytes(int(30 * 10) -1, 3)
# fpga.write(b'\x00\x01' + addr_start + addr_stop)
# # time.sleep(2)
# # fpga.write(b'\x00\x01' + addr_start + addr_stop)
# ############ 40 ch test
# g0_ch_list = [b'\x00\xC8',b'\x00\xC9',b'\x00\xCA',b'\x00\xCB',b'\x00\xCC',b'\x00\xCD',b'\x00\xCE',b'\x00\xCF']
# g1_ch_list = [b'\x00\xD0',b'\x00\xD1',b'\x00\xD2',b'\x00\xD3',b'\x00\xD4',b'\x00\xD5',b'\x00\xD6',b'\x00\xD7']
# g2_ch_list = [b'\x00\xD8',b'\x00\xD9',b'\x00\xDA',b'\x00\xDB',b'\x00\xDC',b'\x00\xDD',b'\x00\xDE',b'\x00\xDF']
# g3_ch_list = [b'\x00\xE0',b'\x00\xE1',b'\x00\xE2',b'\x00\xE3',b'\x00\xE4',b'\x00\xE5',b'\x00\xE6',b'\x00\xE7']
# g4_ch_list = [b'\x00\xE8',b'\x00\xE9',b'\x00\xEA',b'\x00\xEB',b'\x00\xEC',b'\x00\xED',b'\x00\xEE',b'\x00\xEF']
#
# y_str = b''
# sin_pts = 50
# ch_num = 40
# for x in range(sin_pts):
# y = np.sin((float(x)/sin_pts+0)*2*np.pi) * (2**13-1) + 2**13
# y_int = num_to_bytes(int(y)*4, 2)
# for ii in range(8):
# y_str += g0_ch_list[ii] + y_int
# y_str += g1_ch_list[ii] + y_int
# y_str += g2_ch_list[ii] + y_int
# y_str += g3_ch_list[ii] + y_int
# y_str += g4_ch_list[ii] + y_int
# # y_str += g1_ch_list[0] + y_int
# # y_str += g1_ch_list[1] + y_int
# print(len(y_str))
# print('the y and y_int are ', y, ',', bytes_to_hexstr(y_int))
#
#
#
# data_DAC = b''
# cycles = 2
# for ii in range(cycles):
# data_DAC += y_str
#
# # for ii in range(8):
# # y_str += g0_ch_list[ii] + y_int
# #
# # data_DAC += b'\x00'+b'\xC8'+ b'\x80\x00' + b'\x00'+b'\xCC'+ b'\x80\x00' #CC
#
# addr_start = b'\x00\x00\x00'
# addr_stop = num_to_bytes(int(len(data_DAC)/4) -1, 3) #+ch_num
# print(bytes_to_hexstr(b'\x00\x33' + addr_start + addr_stop))
# fpga.write(b'\x00\x33' + addr_start + addr_stop + data_DAC)
# time.sleep(0.01)
#
# # fpga.write(b'\x00\x36\x01\x19') # 2 us for 00F9
# # time.sleep(0.01)
# # fpga.ad5371_play_set(40, [124,74,10])
# # fpga.ad5371_play_set(ch_num, [106,59,111])
# fpga.ad5371_play_set(ch_num, [106,59,111])
# addr_start = b'\x00\x00\x00'
# # addr_stop = num_to_bytes(int(30 * 10) -1, 3)
# fpga.write(b'\x00\x01' + addr_start + addr_stop)
# time.sleep(0.002)
# # fpga.write(b'\x00\x01' + addr_start + addr_stop)
# fpga.ad5371_ini()
#
#
# y_str = b''
# sin_pts = 100
# for x in range(sin_pts):
# y = np.sin((float(x)/sin_pts+0.25)*2*np.pi) * (2**13-1) + 2**13
# y_int = num_to_bytes(int(y)*4, 2)
# y_str += b'\x00'+b'\xC8'+y_int
# # print len(y_str)
# print 'the y and y_int are ', y, ',', bytes_to_hexstr(y_int)
#
# data_DAC = b''
# cycles = 10
# for ii in range(cycles):
# data_DAC += y_str
#
# addr_start = b'\x00\x00\x00'
# addr_stop = num_to_bytes(int(sin_pts * cycles) - 1, 3)
# print bytes_to_hexstr(b'\x00\x33' + addr_start + addr_stop)
# fpga.write(b'\x00\x33' + addr_start + addr_stop + data_DAC)
# time.sleep(0.01)
#
# fpga.write(b'\x00\x36\x01\x19') # 2/3 us for 00F9/0176
# time.sleep(0.01)
# addr_start = b'\x00\x00\x00'
# addr_stop = num_to_bytes(int(100 * 10) -1, 3)
# fpga.write(b'\x00\x01' + addr_start + addr_stop)
###data check function
# while True:
# check_data_temp = fpga.read()
# if len(check_data_temp) == 0:
# break
# else:
# print bytes_to_hexstr(check_data_temp)
# raw_data_list_1 = [ [[1,200,0],['high',5]], [[0,200,0],['low',4]], [[1,200,0],['high',5]] ]
# raw_data_list_2 = [ [[0.5,100,0.5],['high',10]], [[0.1,100,0.5],['low',8]], [[0.9,100,0.5],['high',10]] ]
# t1=time.time()
# for kk in range(1):
# for jj in range(16):
# for ii in range(200):
# fpga.single_data_download(jj, raw_data_list_1)
# fpga.single_data_download(jj, raw_data_list_2)
# print 'time consumpted', time.time()-t1
###test function
# a, b = fpga.cw_play(1,1,200,0)
# print a ,' ',b
# for ii in range(16):
# fpga.cw_play(ii,1,200,0)
####### this one is a simple one for test the frequency difference
# a=fpga.dds_data_form(True,0.5,200,0.25)
# print 'hp-dds result'
# print bytes_to_hexstr(a[0])
# print a[1], a[2]
# b=fpga.dds_data_form(False,0.5,200,0.25)
# print 'non-hp-dds result'
# print bytes_to_hexstr(b[0])
# print b[1], b[2]
###### protocol speed rate check
# fpga.initial_AD9915(1)
# fpga.initial_ad9910(4)
# fpga.SPI_TEST(1,'0')
# fpga.SPI_TEST(4,'1')
# a, b=fpga.l_read(b'\x00\x10',b'\x0E',b'\x08\xb5\x00\x00\x00\x00\x00\x00')
# print a, b
# fpga.wr_stamp_set(5,[0,1,2])
# fpga.rd_stamp_set(5,[4,9,13])#the error will occur
# for ii in range(10):
# print "the %d time result:" %ii
# a=fpga.data_check(5, 4)
# print a
# if a:
# continue
# break
###### protocol speed rate check (end)
################former used, but has been integrated
# fpga.initial_AD9915(3) ####initial
# fpga.initial_AD9915(2)
# fpga.initial_AD9915(1)
# fpga.initial_ad9910(4)
# fpga.initial_ad9910(5)
# fpga.phase_clear_2g5(3) ####clear phase
# fpga.phase_clear_2g5(2)
# fpga.phase_clear_2g5(1)
# fpga.phase_clear_1g(4)
# fpga.phase_clear_1g(5)
# fpga.mannual_sync_2g5() ###mannual synchronization
# fpga.mannual_sync_1g()
### data_list_processing
# a = fpga.raw_data_list_head([[[1,200,0],['high',10]], [[0,200,0],['low',10]], [[1,200,0],['high',10],['low',10]]])
# print a
# b=fpga.raw_data_list_tail(a)
# print b
# fpga.single_data_download(4, [[[1,200,0],['high',10]], [[0,200,0],['low',10]], [[1,200,0],['high',10],['low',10]]])
# fpga.single_test_download(1) ###test_data download
# fpga.single_test_download(2)
# fpga.single_test_download(3)
################former used, but has been integrated(end)
################ test call function
################ this part is really useless
# a = fpga.pulse_data_gen(True,[[[1,200,0],['high',10]], [[0,200,0],['low',10]], [[1,200,0],['high',10],['low',10]]])
# # print bytes_to_hexstr(a[0])
# # print bytes_to_hexstr(a[1])
# print bytes_to_hexstr(a[1][0:4])
# print len(a[1][4:]) #this length is 4*ttl_num
# print bytes_to_hexstr(a[2])
# a = [[[1,200,0],['high',10]], [[0,200,0],['low',10]], [[1,200,0],['high',10],['low',10]]]
# print a
# fpga.raw_data_list_pro(a)
# print a
################test call function(end)
# # 最终使用的
# fpga.start()
# fpga.socket_server_new()
# fpga.counter(cnt_time)
| [
"ctypes.c_byte",
"ctypes.c_long",
"os.path.exists",
"random.choice",
"ctypes.cdll.LoadLibrary",
"ctypes.c_ubyte",
"time.sleep",
"numpy.array",
"platform.architecture",
"ctypes.pointer",
"ctypes.c_char_p",
"traceback.print_exc"
] | [((26491, 26508), 'ctypes.c_ubyte', 'ctypes.c_ubyte', (['(0)'], {}), '(0)\n', (26505, 26508), False, 'import ctypes\n'), ((27284, 27300), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (27294, 27300), False, 'import time\n'), ((27343, 27359), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (27353, 27359), False, 'import time\n'), ((27641, 27659), 'ctypes.c_long', 'ctypes.c_long', (['num'], {}), '(num)\n', (27654, 27659), False, 'import ctypes\n'), ((27678, 27698), 'ctypes.pointer', 'ctypes.pointer', (['cnum'], {}), '(cnum)\n', (27692, 27698), False, 'import ctypes\n'), ((28326, 28346), 'ctypes.c_char_p', 'ctypes.c_char_p', (['msg'], {}), '(msg)\n', (28341, 28346), False, 'import ctypes\n'), ((29641, 29658), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (29651, 29658), False, 'import time\n'), ((29778, 29795), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (29788, 29795), False, 'import time\n'), ((29929, 29946), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (29939, 29946), False, 'import time\n'), ((30082, 30099), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (30092, 30099), False, 'import time\n'), ((31436, 31453), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (31446, 31453), False, 'import time\n'), ((31929, 31946), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (31939, 31946), False, 'import time\n'), ((36839, 36856), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (36849, 36856), False, 'import time\n'), ((37483, 37500), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (37493, 37500), False, 'import time\n'), ((37896, 37913), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (37906, 37913), False, 'import time\n'), ((38422, 38439), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (38432, 38439), False, 'import time\n'), ((39341, 39358), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (39351, 39358), False, 'import time\n'), ((40105, 40122), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (40115, 40122), False, 'import time\n'), ((41692, 41709), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (41702, 41709), False, 'import time\n'), ((42460, 42477), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (42470, 42477), False, 'import time\n'), ((43816, 43833), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (43826, 43833), False, 'import time\n'), ((45257, 45274), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (45267, 45274), False, 'import time\n'), ((46768, 46785), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (46778, 46785), False, 'import time\n'), ((47101, 47118), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (47111, 47118), False, 'import time\n'), ((25150, 25173), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (25171, 25173), False, 'import platform\n'), ((25854, 25894), 'os.path.exists', 'os.path.exists', (['driver_possible_list[ii]'], {}), '(driver_possible_list[ii])\n', (25868, 25894), False, 'import os\n'), ((26701, 26721), 'ctypes.c_byte', 'ctypes.c_byte', (['index'], {}), '(index)\n', (26714, 26721), False, 'import ctypes\n'), ((30715, 30732), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (30725, 30732), False, 'import time\n'), ((30917, 30934), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (30927, 30934), False, 'import time\n'), ((24605, 24729), 'random.choice', 'random.choice', (['[b\'\\xff\\x00\\x00\\x00\', b\'\\xff\\x00\\x00\\x01\', b\'\\x00\' * 4, b\'\\x11\' * 4, b\'"\' *\n 4, b\'\\x11\' * 6, b\'\\xff\' * 4]'], {}), '([b\'\\xff\\x00\\x00\\x00\', b\'\\xff\\x00\\x00\\x01\', b\'\\x00\' * 4, \n b\'\\x11\' * 4, b\'"\' * 4, b\'\\x11\' * 6, b\'\\xff\' * 4])\n', (24618, 24729), False, 'import random\n'), ((26021, 26075), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['driver_possible_list[d_index]'], {}), '(driver_possible_list[d_index])\n', (26044, 26075), False, 'import ctypes\n'), ((26136, 26157), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (26155, 26157), False, 'import traceback\n'), ((40630, 40647), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (40640, 40647), False, 'import time\n'), ((42938, 42955), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (42948, 42955), False, 'import time\n'), ((45786, 45803), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (45796, 45803), False, 'import time\n'), ((47628, 47645), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (47638, 47645), False, 'import time\n'), ((635, 663), 'numpy.array', 'np.array', (['[num]'], {'dtype': '""">u8"""'}), "([num], dtype='>u8')\n", (643, 663), True, 'import numpy as np\n'), ((726, 754), 'numpy.array', 'np.array', (['[num]'], {'dtype': '"""<u8"""'}), "([num], dtype='<u8')\n", (734, 754), True, 'import numpy as np\n')] |
# uniform content loss + adaptive threshold + per_class_input + recursive G
# improvement upon cqf37
from __future__ import division
import os, scipy.io, scipy.misc, cv2
import torch
import numpy as np
import glob
import utils
from unet import UNet
from torch.utils.data import DataLoader
from dataset.SID import SIDFujiTestDataset
test_input_dir = './dataset/SID/Fuji/test/short/'
test_gt_dir = './dataset/SID/Fuji/test/long/'
test_list_file= './dataset/SID/Fuji_test_list.txt'
checkpoint_dir = './Fuji_results/result_Fuji_MSSSIMLoss025/'
result_dir = checkpoint_dir
ckpt = checkpoint_dir + 'model.pth'
# get test IDs
test_fns = glob.glob(test_gt_dir + '*.png')
test_ids = [int(os.path.basename(test_fn)[0:5]) for test_fn in test_fns]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
unet = UNet()
unet.load_state_dict(torch.load(ckpt))
unet.to(device)
test_dataset = SIDFujiTestDataset(list_file=test_list_file, root_dir='./dataset/SID/')
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0)
iteration = 0
if not os.path.isdir(result_dir + 'final/'):
os.makedirs(result_dir + 'final/')
with torch.no_grad():
unet.eval()
for sample in iter(test_dataloader):
in_fn = sample['in_fn'][0]
print(in_fn)
in_img = sample['in_img'].to(device)
gt_img = sample['gt_img'].to(device)
out_img = unet(in_img)
output = out_img.permute(0, 2, 3, 1).cpu().data.numpy()
output = np.minimum(np.maximum(output, 0), 1)
gt_img = gt_img.permute(0, 2, 3, 1).cpu().data.numpy()
gt_img = np.minimum(np.maximum(gt_img, 0), 1)
output = output[0, :, :, :]
gt_img = gt_img[0, :, :, :]
output = cv2.resize(output, (sample['width'], sample['hight']))
# if '_00_' in in_fn:
scipy.misc.toimage(output * 255, high=255, low=0, cmin=0, cmax=255).save(
result_dir + 'final/' + in_fn)
| [
"os.makedirs",
"unet.UNet",
"dataset.SID.SIDFujiTestDataset",
"torch.load",
"os.path.isdir",
"torch.cuda.is_available",
"os.path.basename",
"torch.utils.data.DataLoader",
"numpy.maximum",
"torch.no_grad",
"cv2.resize",
"glob.glob"
] | [((633, 665), 'glob.glob', 'glob.glob', (["(test_gt_dir + '*.png')"], {}), "(test_gt_dir + '*.png')\n", (642, 665), False, 'import glob\n'), ((819, 825), 'unet.UNet', 'UNet', ([], {}), '()\n', (823, 825), False, 'from unet import UNet\n'), ((897, 968), 'dataset.SID.SIDFujiTestDataset', 'SIDFujiTestDataset', ([], {'list_file': 'test_list_file', 'root_dir': '"""./dataset/SID/"""'}), "(list_file=test_list_file, root_dir='./dataset/SID/')\n", (915, 968), False, 'from dataset.SID import SIDFujiTestDataset\n'), ((987, 1055), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=1, shuffle=False, num_workers=0)\n', (997, 1055), False, 'from torch.utils.data import DataLoader\n'), ((847, 863), 'torch.load', 'torch.load', (['ckpt'], {}), '(ckpt)\n', (857, 863), False, 'import torch\n'), ((1078, 1114), 'os.path.isdir', 'os.path.isdir', (["(result_dir + 'final/')"], {}), "(result_dir + 'final/')\n", (1091, 1114), False, 'import os, scipy.io, scipy.misc, cv2\n'), ((1120, 1154), 'os.makedirs', 'os.makedirs', (["(result_dir + 'final/')"], {}), "(result_dir + 'final/')\n", (1131, 1154), False, 'import os, scipy.io, scipy.misc, cv2\n'), ((1161, 1176), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1174, 1176), False, 'import torch\n'), ((774, 799), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (797, 799), False, 'import torch\n'), ((1740, 1794), 'cv2.resize', 'cv2.resize', (['output', "(sample['width'], sample['hight'])"], {}), "(output, (sample['width'], sample['hight']))\n", (1750, 1794), False, 'import os, scipy.io, scipy.misc, cv2\n'), ((682, 707), 'os.path.basename', 'os.path.basename', (['test_fn'], {}), '(test_fn)\n', (698, 707), False, 'import os, scipy.io, scipy.misc, cv2\n'), ((1506, 1527), 'numpy.maximum', 'np.maximum', (['output', '(0)'], {}), '(output, 0)\n', (1516, 1527), True, 'import numpy as np\n'), ((1623, 1644), 'numpy.maximum', 'np.maximum', (['gt_img', '(0)'], {}), '(gt_img, 0)\n', (1633, 1644), True, 'import numpy as np\n')] |
from torch.nn.functional import fractional_max_pool2d
from similar_words import similar
from visualize import display_pca_scatterplot
from PIL import Image
from gensim.models import KeyedVectors
import numpy as np
import moviepy.editor as mpe
import os
import cv2
import glob
def add_audio(path, theme, mood):
video_name ="D:\Fall 2021\Deep Learning for Text Data - CSE 8803 DLT\PROJECT\SYNCPHONIC\places365\images\output_videos\mygeneratedvideo.mp4"
audio_name = "D:/Fall 2021/Deep Learning for Text Data - CSE 8803 DLT/PROJECT/SYNCPHONIC/places365/audio/"+final_theme+"/upbeat.mp3"
my_clip = mpe.VideoFileClip(video_name)
audio_background = mpe.AudioFileClip(audio_name)
final_audio = mpe.CompositeAudioClip([audio_background])
# final_clip = my_clip.set_audio(final_audio)
my_clip.audio = final_audio
my_clip.write_videofile("D:/Fall 2021/Deep Learning for Text Data - CSE 8803 DLT/PROJECT/SYNCPHONIC/places365/images/output_videos/new_filename.mp4", fps=25)
return my_clip
# Video Generating function
def generate_video(path):
# image_folder = path
video_name = "D:\Fall 2021\Deep Learning for Text Data - CSE 8803 DLT\PROJECT\SYNCPHONIC\places365\images\output_videos\mygeneratedvideo.mp4"
os.chdir(path)
images = [img for img in os.listdir(path)
if img.endswith(".jpg") or
img.endswith(".jpeg") or
img.endswith("png")]
# Array images should only consider
# the image files ignoring others if any
print(images)
frame = cv2.imread(os.path.join(path, images[0]))
# setting the frame width, height width
# the width, height of first image
height, width, layers = frame.shape
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
video = cv2.VideoWriter(video_name, fourcc, 1, (width, height))
# Appending the images to the video one by one
for image in images:
video.write(cv2.imread(os.path.join(path, image)))
# Deallocating memories taken for window creation
cv2.destroyAllWindows()
video.release() # releasing the video generated
return video
def video_preprocess(path):
# Folder which contains all the images
# from which video is to be generated
os.chdir(path)
mean_height = 0
mean_width = 0
num_of_images = len(os.listdir('.'))
# print(num_of_images)
for file in os.listdir('.'):
im = Image.open(os.path.join(path, file))
width, height = im.size
mean_width += width
mean_height += height
# im.show() # uncomment this for displaying the image
# Finding the mean height and width of all images.
# This is required because the video frame needs
# to be set with same width and height. Otherwise
# images not equal to that width height will not get
# embedded into the video
mean_width = int(mean_width / num_of_images)
mean_height = int(mean_height / num_of_images)
# print(mean_height)
# print(mean_width)
# Resizing of the images to give
# them same width and height
for file in os.listdir('.'):
if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith("png"):
# opening image using PIL Image
im = Image.open(os.path.join(path, file))
# im.size includes the height and width of image
width, height = im.size
print(width, height)
# resizing
imResize = im.resize((mean_width, mean_height), Image.ANTIALIAS)
imResize.save( file, 'JPEG', quality = 95) # setting quality
# printing each resized image name
print(im.filename.split('\\')[-1], " is resized")
if __name__ == "__main__":
moods = ['Happy', 'Funny', 'Chill', 'Dramatic', 'Sad', 'Romantic', 'Serious' , 'Scary' , 'Peaceful']
video_themes = ['Landscape', 'Nature', 'Sports', 'Food', 'Buildings', 'Art', 'Technology' , 'Roadtrip']
path = "D:\Fall 2021\Deep Learning for Text Data - CSE 8803 DLT\PROJECT\SYNCPHONIC\places365\images/trek"
scene_info = []
overall_theme = {}
for image_path in os.listdir(path):
input_path = os.path.join(path, image_path)
img = Image.open(input_path)
img_desc, img_theme = similar(img)
scene_info.append(img_desc)
if(img_theme not in overall_theme):
overall_theme[img_theme] = 1
else:
overall_theme[img_theme] += 1
# print(scene_info)
# print(final_mood)
# print("_________________")
scene_info = list(np.concatenate(scene_info).flat)
cleaned_scene_info = []
for scene in scene_info:
scene=scene.split('_')[0]
scene=scene.split('/')[0]
cleaned_scene_info.append(scene)
print(cleaned_scene_info)
print(overall_theme)
final_theme = max(overall_theme, key=overall_theme.get)
print(final_theme)
# model = KeyedVectors.load_word2vec_format('D:/Fall 2021/Deep Learning for Text Data - CSE 8803 DLT/PROJECT/SYNCPHONIC/places365/GoogleNews-vectors-negative300-SLIM.bin.gz', binary=True)
# display_pca_scatterplot(model, cleaned_scene_info, moods, video_themes)
video_preprocess(path)
generate_video(path)
mood = "upbeat"
# path = "D:\Fall 2021\Deep Learning for Text Data - CSE 8803 DLT\PROJECT\SYNCPHONIC\places365\images\city"
# final_theme = "buildings"
# mood = "upbeat"
add_audio(path, final_theme, mood)
| [
"moviepy.editor.AudioFileClip",
"moviepy.editor.CompositeAudioClip",
"os.listdir",
"PIL.Image.open",
"similar_words.similar",
"os.path.join",
"cv2.VideoWriter",
"os.chdir",
"cv2.destroyAllWindows",
"cv2.VideoWriter_fourcc",
"numpy.concatenate",
"moviepy.editor.VideoFileClip"
] | [((629, 658), 'moviepy.editor.VideoFileClip', 'mpe.VideoFileClip', (['video_name'], {}), '(video_name)\n', (646, 658), True, 'import moviepy.editor as mpe\n'), ((683, 712), 'moviepy.editor.AudioFileClip', 'mpe.AudioFileClip', (['audio_name'], {}), '(audio_name)\n', (700, 712), True, 'import moviepy.editor as mpe\n'), ((732, 774), 'moviepy.editor.CompositeAudioClip', 'mpe.CompositeAudioClip', (['[audio_background]'], {}), '([audio_background])\n', (754, 774), True, 'import moviepy.editor as mpe\n'), ((1289, 1303), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1297, 1303), False, 'import os\n'), ((1802, 1844), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""m"""', '"""p"""', '"""4"""', '"""v"""'], {}), "('m', 'p', '4', 'v')\n", (1824, 1844), False, 'import cv2\n'), ((1858, 1913), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_name', 'fourcc', '(1)', '(width, height)'], {}), '(video_name, fourcc, 1, (width, height))\n', (1873, 1913), False, 'import cv2\n'), ((2127, 2150), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2148, 2150), False, 'import cv2\n'), ((2359, 2373), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (2367, 2373), False, 'import os\n'), ((2518, 2533), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (2528, 2533), False, 'import os\n'), ((3260, 3275), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3270, 3275), False, 'import os\n'), ((4347, 4363), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4357, 4363), False, 'import os\n'), ((1625, 1654), 'os.path.join', 'os.path.join', (['path', 'images[0]'], {}), '(path, images[0])\n', (1637, 1654), False, 'import os\n'), ((2450, 2465), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (2460, 2465), False, 'import os\n'), ((4387, 4417), 'os.path.join', 'os.path.join', (['path', 'image_path'], {}), '(path, image_path)\n', (4399, 4417), False, 'import os\n'), ((4435, 4457), 'PIL.Image.open', 'Image.open', (['input_path'], {}), '(input_path)\n', (4445, 4457), False, 'from PIL import Image\n'), ((4489, 4501), 'similar_words.similar', 'similar', (['img'], {}), '(img)\n', (4496, 4501), False, 'from similar_words import similar\n'), ((1342, 1358), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1352, 1358), False, 'import os\n'), ((2560, 2584), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (2572, 2584), False, 'import os\n'), ((4817, 4843), 'numpy.concatenate', 'np.concatenate', (['scene_info'], {}), '(scene_info)\n', (4831, 4843), True, 'import numpy as np\n'), ((2030, 2055), 'os.path.join', 'os.path.join', (['path', 'image'], {}), '(path, image)\n', (2042, 2055), False, 'import os\n'), ((3436, 3460), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (3448, 3460), False, 'import os\n')] |
import os
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from S2TruckDetect.src.S2TD.array_utils.points import rasterize
from OSMPythonTools.overpass import Overpass
from OSMPythonTools.overpass import overpassQueryBuilder
def buffer_bbox(bbox_osm):
"""
Buffers a EPSG:4326 bounding box slightly to ensure covering the whole area of interest.
:param bbox_osm: array-like of four coordinates: miny, minx, maxy, maxx.
:return: array-like of four coordinates: miny, minx, maxy, maxx
"""
offset_lat, offset_lon = 0.02, 0.02 # degrees
bbox_osm[0] -= offset_lat # min lat
bbox_osm[1] -= offset_lon # min lon
bbox_osm[2] += offset_lat # max lat
bbox_osm[3] += offset_lon # max lon
return bbox_osm
def fetch_osm(bbox, osm_value="motorway", osm_key="highway"):
"""
Fetches OSM road data from the OverpassAPI.
:param bbox: array-like of four coordinates: miny, minx, maxy, maxx.
:param osm_value: str specifies the OSM value to be retrieved.
:param osm_key: str specifies the OSM key to be retrieved.
:return: gpd.GeoDataFrame
"""
element_type = ["way", "relation"]
bbox_osm = buffer_bbox(bbox)
quot = '"'
select = quot + osm_key + quot + "=" + quot + osm_value + quot
select_link = select.replace(osm_value, osm_value + "_link") # also get road links
select_junction = select.replace(osm_value, osm_value + "_junction")
geoms = []
for selector in [select, select_link, select_junction]:
query = overpassQueryBuilder(bbox=bbox_osm,
elementType=element_type,
selector=selector,
out="body",
includeGeometry=True)
try:
elements = Overpass().query(query, timeout=120).elements()
except Exception: # type?
elements = []
Warning("Could not download OSM data")
# create multiline of all elements
if len(elements) > 0:
for i in range(len(elements)):
elem = elements[i]
try:
geoms.append(elem.geometry())
except Exception:
continue
else:
Warning("Could not retrieve " + select)
if len(geoms) > 0:
lines = gpd.GeoDataFrame(crs="EPSG:4326", geometry=geoms)
n = len(geoms)
lines["osm_value"] = [osm_value] * n # add road type
return lines
def fetch_roads(bbox, osm_values, buffer_meters, dir_out, filename, crs):
"""
Iterates over all OSM road values of interest, fetches the road data, creates buffered road polygons.
:param bbox: array-like of four coordinates: miny, minx, maxy, maxx.
:param osm_values: array-like of str OSM road values.
:param buffer_meters: float buffer in meters for creating road polygons.
:param dir_out: str directory where to write the road data.
:param filename: str file name prefix to use when writing the road data.
:param crs: str output crs.
:return: str file path to road polygons
"""
if crs.is_geographic:
# estimate UTM EPSG for buffer in meters. Output will be CRS specified by crs argument
p = Point(bbox[:2][::-1])
crs_projected = gpd.GeoDataFrame({"geometry": [p]}, crs="EPSG:4326").estimate_utm_crs().to_string()
else:
crs_projected = crs.to_string()
fwrite = os.path.join(dir_out, filename + ".gpkg")
file_tmp = os.path.join(dir_out, "tmp.gpkg")
buffer_dist = "buffer_distance"
if os.path.exists(file_tmp):
os.remove(file_tmp)
if os.path.exists(fwrite):
pass
else:
roads = []
offset = 5 # meters
# buffer according to road type
m, t, p, s, ter = "motorway", "trunk", "primary", "secondary", "tertiary"
buffers = {m: buffer_meters, t: buffer_meters - offset, p: buffer_meters - offset,
s: buffer_meters - (3 * offset), ter: buffer_meters - (4 * offset)}
osm_values_int = {m: 1, t: 2, p: 3, s: 4, ter: 5}
for osm_value in osm_values:
roads_osm = fetch_osm(bbox=bbox, osm_value=osm_value)
if roads_osm is None:
pass
else:
roads_osm.to_file(file_tmp, driver="GPKG")
roads_osm = gpd.read_file(file_tmp)
roads_osm = roads_osm.to_crs(crs_projected)
roads_osm[buffer_dist] = [buffers[osm_value]] * len(roads_osm)
roads_osm["osm_value_int"] = osm_values_int[osm_value]
roads.append(roads_osm)
try:
roads_merge = gpd.GeoDataFrame(pd.concat(roads, ignore_index=True), crs=roads[0].crs) # merge all roads
except ValueError:
Warning("No road vectors")
else:
buffered = roads_merge.buffer(distance=roads_merge[buffer_dist]) # buffer the road vectors -> polygons
roads_merge.geometry = buffered
roads_merge.to_crs(crs).to_file(fwrite, driver="GPKG")
if os.path.exists(file_tmp):
os.remove(file_tmp)
return fwrite
def rasterize_roads(osm, reference_raster):
"""
Rasterizes road polygons to a reference grid.
:param osm: gpd.GeoDataFrame contains the road polygons.
:param reference_raster: numpy array with two dimensions, the reference grid.
:return: numpy array with two dimensions, the rasterized road polygons.
"""
osm_values = list(set(osm["osm_value"]))
nan_placeholder = 100
road_rasters = []
for osm_value in osm_values:
osm_subset = osm[osm["osm_value"] == osm_value]
raster = rasterize(osm_subset, reference_raster.lat, reference_raster.lon)
cond = np.isfinite(raster)
raster_osm = np.where(cond, list(osm_subset.osm_value_int)[0],
nan_placeholder) # use placeholder instead of nan first
raster_osm = raster_osm.astype(np.float)
road_rasters.append(raster_osm)
# merge road types in one layer
# use the lowest value (highest road level) because some intersect
road_raster_np = np.int8(road_rasters).min(axis=0)
road_raster_np[road_raster_np == nan_placeholder] = 0
return road_raster_np # 0=no_road 1=motorway, 2=trunk, ...
| [
"os.path.exists",
"numpy.int8",
"S2TruckDetect.src.S2TD.array_utils.points.rasterize",
"geopandas.read_file",
"os.path.join",
"shapely.geometry.Point",
"OSMPythonTools.overpass.overpassQueryBuilder",
"numpy.isfinite",
"pandas.concat",
"OSMPythonTools.overpass.Overpass",
"geopandas.GeoDataFrame",... | [((3513, 3554), 'os.path.join', 'os.path.join', (['dir_out', "(filename + '.gpkg')"], {}), "(dir_out, filename + '.gpkg')\n", (3525, 3554), False, 'import os\n'), ((3570, 3603), 'os.path.join', 'os.path.join', (['dir_out', '"""tmp.gpkg"""'], {}), "(dir_out, 'tmp.gpkg')\n", (3582, 3603), False, 'import os\n'), ((3647, 3671), 'os.path.exists', 'os.path.exists', (['file_tmp'], {}), '(file_tmp)\n', (3661, 3671), False, 'import os\n'), ((3708, 3730), 'os.path.exists', 'os.path.exists', (['fwrite'], {}), '(fwrite)\n', (3722, 3730), False, 'import os\n'), ((1559, 1678), 'OSMPythonTools.overpass.overpassQueryBuilder', 'overpassQueryBuilder', ([], {'bbox': 'bbox_osm', 'elementType': 'element_type', 'selector': 'selector', 'out': '"""body"""', 'includeGeometry': '(True)'}), "(bbox=bbox_osm, elementType=element_type, selector=\n selector, out='body', includeGeometry=True)\n", (1579, 1678), False, 'from OSMPythonTools.overpass import overpassQueryBuilder\n'), ((2408, 2457), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'crs': '"""EPSG:4326"""', 'geometry': 'geoms'}), "(crs='EPSG:4326', geometry=geoms)\n", (2424, 2457), True, 'import geopandas as gpd\n'), ((3320, 3341), 'shapely.geometry.Point', 'Point', (['bbox[:2][::-1]'], {}), '(bbox[:2][::-1])\n', (3325, 3341), False, 'from shapely.geometry import Point\n'), ((3681, 3700), 'os.remove', 'os.remove', (['file_tmp'], {}), '(file_tmp)\n', (3690, 3700), False, 'import os\n'), ((5760, 5825), 'S2TruckDetect.src.S2TD.array_utils.points.rasterize', 'rasterize', (['osm_subset', 'reference_raster.lat', 'reference_raster.lon'], {}), '(osm_subset, reference_raster.lat, reference_raster.lon)\n', (5769, 5825), False, 'from S2TruckDetect.src.S2TD.array_utils.points import rasterize\n'), ((5841, 5860), 'numpy.isfinite', 'np.isfinite', (['raster'], {}), '(raster)\n', (5852, 5860), True, 'import numpy as np\n'), ((5150, 5174), 'os.path.exists', 'os.path.exists', (['file_tmp'], {}), '(file_tmp)\n', (5164, 5174), False, 'import os\n'), ((6240, 6261), 'numpy.int8', 'np.int8', (['road_rasters'], {}), '(road_rasters)\n', (6247, 6261), True, 'import numpy as np\n'), ((4424, 4447), 'geopandas.read_file', 'gpd.read_file', (['file_tmp'], {}), '(file_tmp)\n', (4437, 4447), True, 'import geopandas as gpd\n'), ((4754, 4789), 'pandas.concat', 'pd.concat', (['roads'], {'ignore_index': '(True)'}), '(roads, ignore_index=True)\n', (4763, 4789), True, 'import pandas as pd\n'), ((5192, 5211), 'os.remove', 'os.remove', (['file_tmp'], {}), '(file_tmp)\n', (5201, 5211), False, 'import os\n'), ((3366, 3418), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': [p]}"], {'crs': '"""EPSG:4326"""'}), "({'geometry': [p]}, crs='EPSG:4326')\n", (3382, 3418), True, 'import geopandas as gpd\n'), ((1858, 1868), 'OSMPythonTools.overpass.Overpass', 'Overpass', ([], {}), '()\n', (1866, 1868), False, 'from OSMPythonTools.overpass import Overpass\n')] |
from ScopeFoundry import Measurement
from ScopeFoundry.scanning.base_raster_scan import BaseRaster2DScan
import time
import numpy as np
class BaseNonRaster2DScan(BaseRaster2DScan):
name = "base_non_raster_2Dscan"
def gen_raster_scan(self, gen_arrays=True):
self.Npixels = self.Nh.val*self.Nv.val
self.scan_shape = (1, self.Nv.val, self.Nh.val)
if gen_arrays:
#print "t0", time.time() - t0
self.create_empty_scan_arrays()
#print "t1", time.time() - t0
# t0 = time.time()
# pixel_i = 0
# for jj in range(self.Nv.val):
# #print "tjj", jj, time.time() - t0
# self.scan_slow_move[pixel_i] = True
# for ii in range(self.Nh.val):
# self.scan_v_positions[pixel_i] = self.v_array[jj]
# self.scan_h_positions[pixel_i] = self.h_array[ii]
# self.scan_index_array[pixel_i,:] = [0, jj, ii]
# pixel_i += 1
# print "for loop raster gen", time.time() - t0
t0 = time.time()
H, V = np.meshgrid(self.h_array, self.v_array)
self.scan_h_positions[:] = H.flat
self.scan_v_positions[:] = V.flat
II,JJ = np.meshgrid(np.arange(self.Nh.val), np.arange(self.Nv.val))
self.scan_index_array[:,1] = JJ.flat
self.scan_index_array[:,2] = II.flat
#self.scan_v_positions
print("array flatten raster gen", time.time() - t0)
def gen_spiral_scan(self, gen_arrays=True):
#self.Npixels = self.Nh.val*self.Nv.val
self.scan_shape = (1, Npixels)
if gen_arrays:
#print "t0", time.time() - t0
self.create_empty_scan_arrays()
#print "t1", time.time() - t0
# t0 = time.time()
# pixel_i = 0
# for jj in range(self.Nv.val):
# #print "tjj", jj, time.time() - t0
# self.scan_slow_move[pixel_i] = True
# for ii in range(self.Nh.val):
# self.scan_v_positions[pixel_i] = self.v_array[jj]
# self.scan_h_positions[pixel_i] = self.h_array[ii]
# self.scan_index_array[pixel_i,:] = [0, jj, ii]
# pixel_i += 1
# print "for loop raster gen", time.time() - t0
h = ix * np.cos(ix)
v = ix * np.sin(ix)
t0 = time.time()
H, V = np.meshgrid(self.h_array, self.v_array)
self.scan_h_positions[:] = H.flat
self.scan_v_positions[:] = V.flat
II,JJ = np.meshgrid(np.arange(self.Nh.val), np.arange(self.Nv.val))
self.scan_index_array[:,1] = JJ.flat
self.scan_index_array[:,2] = II.flat
#self.scan_v_positions
print("array flatten raster gen", time.time() - t0)
| [
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"time.time",
"numpy.arange"
] | [((1151, 1162), 'time.time', 'time.time', ([], {}), '()\n', (1160, 1162), False, 'import time\n'), ((1196, 1235), 'numpy.meshgrid', 'np.meshgrid', (['self.h_array', 'self.v_array'], {}), '(self.h_array, self.v_array)\n', (1207, 1235), True, 'import numpy as np\n'), ((2636, 2647), 'time.time', 'time.time', ([], {}), '()\n', (2645, 2647), False, 'import time\n'), ((2681, 2720), 'numpy.meshgrid', 'np.meshgrid', (['self.h_array', 'self.v_array'], {}), '(self.h_array, self.v_array)\n', (2692, 2720), True, 'import numpy as np\n'), ((1373, 1395), 'numpy.arange', 'np.arange', (['self.Nh.val'], {}), '(self.Nh.val)\n', (1382, 1395), True, 'import numpy as np\n'), ((1397, 1419), 'numpy.arange', 'np.arange', (['self.Nv.val'], {}), '(self.Nv.val)\n', (1406, 1419), True, 'import numpy as np\n'), ((2563, 2573), 'numpy.cos', 'np.cos', (['ix'], {}), '(ix)\n', (2569, 2573), True, 'import numpy as np\n'), ((2595, 2605), 'numpy.sin', 'np.sin', (['ix'], {}), '(ix)\n', (2601, 2605), True, 'import numpy as np\n'), ((2858, 2880), 'numpy.arange', 'np.arange', (['self.Nh.val'], {}), '(self.Nh.val)\n', (2867, 2880), True, 'import numpy as np\n'), ((2882, 2904), 'numpy.arange', 'np.arange', (['self.Nv.val'], {}), '(self.Nv.val)\n', (2891, 2904), True, 'import numpy as np\n'), ((1600, 1611), 'time.time', 'time.time', ([], {}), '()\n', (1609, 1611), False, 'import time\n'), ((3085, 3096), 'time.time', 'time.time', ([], {}), '()\n', (3094, 3096), False, 'import time\n')] |
import copy
import pytest
import math
import numpy as np
import pandas as pd
from hyperactive import Hyperactive
search_space = {
"x1": list(np.arange(-100, 100, 1)),
}
def test_catch_0():
def objective_function(access):
x = y
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
catch={NameError: np.nan},
)
hyper.run()
def test_catch_1():
def objective_function(access):
a = 1 + "str"
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
catch={TypeError: np.nan},
)
hyper.run()
def test_catch_2():
def objective_function(access):
math.sqrt(-10)
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
catch={ValueError: np.nan},
)
hyper.run()
def test_catch_3():
def objective_function(access):
x = 1 / 0
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
catch={ZeroDivisionError: np.nan},
)
hyper.run()
def test_catch_all_0():
def objective_function(access):
x = y
a = 1 + "str"
math.sqrt(-10)
x = 1 / 0
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
catch={
NameError: np.nan,
TypeError: np.nan,
ValueError: np.nan,
ZeroDivisionError: np.nan,
},
)
hyper.run()
nan_ = hyper.search_data(objective_function)["score"].values[0]
assert math.isnan(nan_)
def test_catch_all_1():
def objective_function(access):
x = y
a = 1 + "str"
math.sqrt(-10)
x = 1 / 0
return 0, {"error": False}
catch_return = (np.nan, {"error": True})
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
catch={
NameError: catch_return,
TypeError: catch_return,
ValueError: catch_return,
ZeroDivisionError: catch_return,
},
)
hyper.run()
nan_ = hyper.search_data(objective_function)["score"].values[0]
error_ = hyper.search_data(objective_function)["error"].values[0]
assert math.isnan(nan_)
assert error_ == True
| [
"math.isnan",
"math.sqrt",
"numpy.arange",
"hyperactive.Hyperactive"
] | [((279, 292), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (290, 292), False, 'from hyperactive import Hyperactive\n'), ((553, 566), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (564, 566), False, 'from hyperactive import Hyperactive\n'), ((828, 841), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (839, 841), False, 'from hyperactive import Hyperactive\n'), ((1099, 1112), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (1110, 1112), False, 'from hyperactive import Hyperactive\n'), ((1440, 1453), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (1451, 1453), False, 'from hyperactive import Hyperactive\n'), ((1809, 1825), 'math.isnan', 'math.isnan', (['nan_'], {}), '(nan_)\n', (1819, 1825), False, 'import math\n'), ((2060, 2073), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (2071, 2073), False, 'from hyperactive import Hyperactive\n'), ((2523, 2539), 'math.isnan', 'math.isnan', (['nan_'], {}), '(nan_)\n', (2533, 2539), False, 'import math\n'), ((148, 171), 'numpy.arange', 'np.arange', (['(-100)', '(100)', '(1)'], {}), '(-100, 100, 1)\n', (157, 171), True, 'import numpy as np\n'), ((782, 796), 'math.sqrt', 'math.sqrt', (['(-10)'], {}), '(-10)\n', (791, 796), False, 'import math\n'), ((1376, 1390), 'math.sqrt', 'math.sqrt', (['(-10)'], {}), '(-10)\n', (1385, 1390), False, 'import math\n'), ((1932, 1946), 'math.sqrt', 'math.sqrt', (['(-10)'], {}), '(-10)\n', (1941, 1946), False, 'import math\n')] |
import numpy as np
from config import clusters # from . = problem in archivedir
cluster = clusters.vsc # change cluster configuration here
class ExperimentConfiguration(object):
def __init__(self):
pass
exp = ExperimentConfiguration()
exp.expname = "exp_v1.19_wb-random_Radar_zero"
exp.model_dx = 2000
exp.n_ens = 40
exp.n_nodes = 10
exp.nature_wrfout = '/home/fs71386/lkugler/data/sim_archive/exp_v1.19_Pwbub5_nat/2008-07-30_12:00/1/wrfout_d01_%Y-%m-%d_%H:%M:%S'
#exp.input_profile = '/home/fs71386/lkugler/wrf_profiles/data/wrf/ens/2021-05-04/raso.nat.001.wrfprof'
#exp.input_profile = '/home/fs71386/lkugler/wrf_profiles/data/wrf/ens/2021-05-04/raso.nat.<iens>.wrfprof'
exp.input_profile = '/home/fs71386/lkugler/wrf_profiles/data/wrf/ens/2021-05-04/raso.fc.<iens>.wrfprof'
#exp.input_profile = '/home/fs71386/lkugler/wrf_profiles/data/wrf/ens/improved_pert10/raso.fc.<iens>.wrfprof'
# localize vertically, if it has a vertical position
# needs a horizontal scale too, to calculate the vertical normalization
# since you can not specify different vertical localizations for diff. variables
exp.cov_loc_vert_km_horiz_km = (1, 30)
#exp.superob_km = 12
n_obs = 961 #5776: 4km, 121: 30km, 256:16x16 (20km); 961: 10km resoltn # radar: n_obs for each observation height level
vis = dict(plotname='VIS 0.6µm', plotunits='[1]',
kind='MSG_4_SEVIRI_BDRF', sat_channel=1, n_obs=n_obs,
error_generate=0.03, error_assimilate=0.06,
cov_loc_radius_km=30)
wv73 = dict(plotname='Brightness temperature WV 7.3µm', plotunits='[K]',
kind='MSG_4_SEVIRI_TB', sat_channel=6, n_obs=n_obs,
error_generate=1., error_assimilate=False,
cov_loc_radius_km=30)
ir108 = dict(plotname='Brightness temperature IR 10.8µm', plotunits='[K]',
kind='MSG_4_SEVIRI_TB', sat_channel=9, n_obs=n_obs,
error_generate=5., error_assimilate=10.,
cov_loc_radius_km=32)
radar = dict(plotname='Radar reflectivity', plotunits='[dBz]',
kind='RADAR_REFLECTIVITY', n_obs=n_obs,
error_generate=2.5, error_assimilate=5.,
heights=np.arange(1000, 15001, 1000),
cov_loc_radius_km=10)
t = dict(plotname='Temperature', plotunits='[K]',
kind='RADIOSONDE_TEMPERATURE', n_obs=n_obs,
error_generate=0.2, error_assimilate=0.4,
heights=np.arange(1000, 15001, 500),
cov_loc_radius_km=15)
t2m = dict(plotname='SYNOP Temperature', plotunits='[K]',
kind='SYNOP_TEMPERATURE', n_obs=n_obs,
error_generate=0.1, error_assimilate=1.,
cov_loc_radius_km=20)
psfc = dict(plotname='SYNOP Pressure', plotunits='[dBz]',
kind='SYNOP_SURFACE_PRESSURE', n_obs=n_obs,
error_generate=50., error_assimilate=100.,
cov_loc_radius_km=32)
exp.observations = [radar] # 108, wv73, vis]
#exp.update_vars = ['T', 'QVAPOR', 'QCLOUD', 'QICE','CLDFRA']
exp.update_vars = ['U', 'V', 'T', 'PH', 'MU', 'QVAPOR', 'QCLOUD', 'QICE', 'CLDFRA']
# directory paths depend on the name of the experiment
cluster.expname = exp.expname
| [
"numpy.arange"
] | [((2156, 2184), 'numpy.arange', 'np.arange', (['(1000)', '(15001)', '(1000)'], {}), '(1000, 15001, 1000)\n', (2165, 2184), True, 'import numpy as np\n'), ((2393, 2420), 'numpy.arange', 'np.arange', (['(1000)', '(15001)', '(500)'], {}), '(1000, 15001, 500)\n', (2402, 2420), True, 'import numpy as np\n')] |
#!/usr/bin/python
__author__ = '<NAME>'
import sys
#sys.path.insert(0, '../lib')
import numpy as np
import dynesty
class CustomNestedSampler(dynesty.NestedSampler):
def convert_to_samples(self):
self.samples = self.results.samples
def unique_rows(self):
'''
Given an array, remove identical rows and also sort it
'''
# Perform lex sort and get sorted data
sorted_idx = np.lexsort(self.flatchain.T)
sorted_data = self.flatchain[sorted_idx,:]
# Get unique row mask
row_mask = np.append([True],np.any(np.diff(sorted_data,axis=0),1))
# Get unique rows
out = sorted_data[row_mask]
self.samples = out
# same for LnL
lnL = np.hstack(self.lnprobability)
sorted_lnL = lnL[sorted_idx]
self.lnL = sorted_lnL[row_mask]
lnL_min_idx = np.argmax(self.lnL)
#print(abs(lnL_min))
# get samples at minimum Lnl
self.minlnL = out[lnL_min_idx]
self.lnL_min = self.lnL[lnL_min_idx]
return
def correct_rows(self,f,ndset,npl):
'''Corrects angles and eccentricities for all samples'''
i=0
self.means=np.zeros(len(f))
for k in range(len(f)):
idx=f[k]
if (idx<2*ndset):
self.means[i]=np.mean(self.samples[:,i])
elif (idx<2*ndset+7*npl):
nr=idx-2*ndset
#x=int(nr/7)
if (np.mod(nr,7)<2):
self.means[i]=np.mean(self.samples[:,i])
elif (np.mod(nr,7)==2): # correct eccentricities
for j in range(len(self.samples)):
if (self.samples[j,i]<0):
self.samples[j,i]=abs(self.samples[j,i])
#if(f[k+1]==i+1):
# self.samples[j,i+1]=self.samples[j,i+1]+180.0
#if(f[k+2]==i+2):
# self.samples[j,i+2]=self.samples[j,i+2]+-180.0
self.means[i]=np.mean(self.samples[:,i])
elif (np.mod(nr,7)==3): # correct w to be in a 360 interval around mean value
self.means[i]=np.mean(self.samples[:,i])
meanw=self.means[i]
for j in range(len(self.samples)):
self.samples[j,i]=np.where(self.samples[j,i]<meanw-180.0,self.samples[j,i]+360.0,self.samples[j,i])
self.samples[j,i]=np.where(self.samples[j,i]>meanw+180.0,self.samples[j,i]-360.0,self.samples[j,i])
# now let's make sure meanw is between 0 and 360:
newmeanw=np.fmod(meanw,360.0)
delta=newmeanw-meanw
if not (delta==0):
for j in range(len(self.samples)):
self.samples[j,i]=self.samples[j,i]+delta
elif (np.mod(nr,7)==4):# correct M to be in a 360 interval around mean value
self.means[i]=np.mean(self.samples[:,i])
meanM=self.means[i]
for j in range(len(self.samples)):
self.samples[j,i]=np.where(self.samples[j,i]<meanM-180.0,self.samples[j,i]+360.0,self.samples[j,i])
self.samples[j,i]=np.where(self.samples[j,i]>meanM+180.0,self.samples[j,i]-360.0,self.samples[j,i])
# now let's make sure meanw is between 0 and 360:
newmeanM=np.fmod(meanM,360.0)
delta=newmeanM-meanM
if not (delta==0):
for j in range(len(self.samples)):
self.samples[j,i]=self.samples[j,i]+delta
elif (idx<2*ndset+6*npl):# correct i to be in a 180 interval around mean value
self.means[i]=np.mean(self.samples[:,i])
meani=self.means[i]
for j in range(len(self.samples)):
self.samples[j,i]=np.where(self.samples[j,i]<meani-90.0,self.samples[j,i]+180.0,self.samples[j,i])
self.samples[j,i]=np.where(self.samples[j,i]>meani+90.0,self.samples[j,i]-180.0,self.samples[j,i])
# now let's make sure meani is between 0 and 180:
newmeani=np.fmod(meani,180.0)
delta=newmeani-meani
if not (delta==0):
for j in range(len(self.samples)):
self.samples[j,i]=self.samples[j,i]+delta
elif (idx<2*ndset+7*npl):# correct lineofnodes to be in a 360 interval around mean value
self.means[i]=np.mean(self.samples[:,i])
meancap=self.means[i]
for j in range(len(self.samples)):
self.samples[j,i]=np.where(self.samples[j,i]<meancap-180.0,self.samples[j,i]+360.0,self.samples[j,i])
self.samples[j,i]=np.where(self.samples[j,i]>meancap+180.0,self.samples[j,i]-360.0,self.samples[j,i])
# now let's make sure meancap is between 0 and 360:
newmeancap=np.fmod(meancap,360.0)
delta=newmeancap-meancap
if not (delta==0):
for j in range(len(self.samples)):
self.samples[j,i]=self.samples[j,i]+delta
else:
self.means[i]=np.mean(self.samples[:,i])
i=i+1
return
def save_samples(self,f,ndset,npl):
self.unique_rows()
self.correct_rows(f,ndset,npl)
return
| [
"numpy.mean",
"numpy.hstack",
"numpy.where",
"numpy.diff",
"numpy.argmax",
"numpy.lexsort",
"numpy.fmod",
"numpy.mod"
] | [((467, 495), 'numpy.lexsort', 'np.lexsort', (['self.flatchain.T'], {}), '(self.flatchain.T)\n', (477, 495), True, 'import numpy as np\n'), ((801, 830), 'numpy.hstack', 'np.hstack', (['self.lnprobability'], {}), '(self.lnprobability)\n', (810, 830), True, 'import numpy as np\n'), ((967, 986), 'numpy.argmax', 'np.argmax', (['self.lnL'], {}), '(self.lnL)\n', (976, 986), True, 'import numpy as np\n'), ((622, 650), 'numpy.diff', 'np.diff', (['sorted_data'], {'axis': '(0)'}), '(sorted_data, axis=0)\n', (629, 650), True, 'import numpy as np\n'), ((1455, 1482), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (1462, 1482), True, 'import numpy as np\n'), ((1609, 1622), 'numpy.mod', 'np.mod', (['nr', '(7)'], {}), '(nr, 7)\n', (1615, 1622), True, 'import numpy as np\n'), ((1660, 1687), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (1667, 1687), True, 'import numpy as np\n'), ((4106, 4133), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (4113, 4133), True, 'import numpy as np\n'), ((4578, 4599), 'numpy.fmod', 'np.fmod', (['meani', '(180.0)'], {}), '(meani, 180.0)\n', (4585, 4599), True, 'import numpy as np\n'), ((1710, 1723), 'numpy.mod', 'np.mod', (['nr', '(7)'], {}), '(nr, 7)\n', (1716, 1723), True, 'import numpy as np\n'), ((2217, 2244), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (2224, 2244), True, 'import numpy as np\n'), ((4283, 4378), 'numpy.where', 'np.where', (['(self.samples[j, i] < meani - 90.0)', '(self.samples[j, i] + 180.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] < meani - 90.0, self.samples[j, i] + 180.0,\n self.samples[j, i])\n', (4291, 4378), True, 'import numpy as np\n'), ((4406, 4501), 'numpy.where', 'np.where', (['(self.samples[j, i] > meani + 90.0)', '(self.samples[j, i] - 180.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] > meani + 90.0, self.samples[j, i] - 180.0,\n self.samples[j, i])\n', (4414, 4501), True, 'import numpy as np\n'), ((4944, 4971), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (4951, 4971), True, 'import numpy as np\n'), ((5417, 5440), 'numpy.fmod', 'np.fmod', (['meancap', '(360.0)'], {}), '(meancap, 360.0)\n', (5424, 5440), True, 'import numpy as np\n'), ((5695, 5722), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (5702, 5722), True, 'import numpy as np\n'), ((2272, 2285), 'numpy.mod', 'np.mod', (['nr', '(7)'], {}), '(nr, 7)\n', (2278, 2285), True, 'import numpy as np\n'), ((2379, 2406), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (2386, 2406), True, 'import numpy as np\n'), ((2869, 2890), 'numpy.fmod', 'np.fmod', (['meanw', '(360.0)'], {}), '(meanw, 360.0)\n', (2876, 2890), True, 'import numpy as np\n'), ((5112, 5210), 'numpy.where', 'np.where', (['(self.samples[j, i] < meancap - 180.0)', '(self.samples[j, i] + 360.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] < meancap - 180.0, self.samples[j, i] + 360.0,\n self.samples[j, i])\n', (5120, 5210), True, 'import numpy as np\n'), ((5238, 5336), 'numpy.where', 'np.where', (['(self.samples[j, i] > meancap + 180.0)', '(self.samples[j, i] - 360.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] > meancap + 180.0, self.samples[j, i] - 360.0,\n self.samples[j, i])\n', (5246, 5336), True, 'import numpy as np\n'), ((2560, 2656), 'numpy.where', 'np.where', (['(self.samples[j, i] < meanw - 180.0)', '(self.samples[j, i] + 360.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] < meanw - 180.0, self.samples[j, i] + 360.0,\n self.samples[j, i])\n', (2568, 2656), True, 'import numpy as np\n'), ((2688, 2784), 'numpy.where', 'np.where', (['(self.samples[j, i] > meanw + 180.0)', '(self.samples[j, i] - 360.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] > meanw + 180.0, self.samples[j, i] - 360.0,\n self.samples[j, i])\n', (2696, 2784), True, 'import numpy as np\n'), ((3141, 3154), 'numpy.mod', 'np.mod', (['nr', '(7)'], {}), '(nr, 7)\n', (3147, 3154), True, 'import numpy as np\n'), ((3246, 3273), 'numpy.mean', 'np.mean', (['self.samples[:, i]'], {}), '(self.samples[:, i])\n', (3253, 3273), True, 'import numpy as np\n'), ((3735, 3756), 'numpy.fmod', 'np.fmod', (['meanM', '(360.0)'], {}), '(meanM, 360.0)\n', (3742, 3756), True, 'import numpy as np\n'), ((3426, 3522), 'numpy.where', 'np.where', (['(self.samples[j, i] < meanM - 180.0)', '(self.samples[j, i] + 360.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] < meanM - 180.0, self.samples[j, i] + 360.0,\n self.samples[j, i])\n', (3434, 3522), True, 'import numpy as np\n'), ((3554, 3650), 'numpy.where', 'np.where', (['(self.samples[j, i] > meanM + 180.0)', '(self.samples[j, i] - 360.0)', 'self.samples[j, i]'], {}), '(self.samples[j, i] > meanM + 180.0, self.samples[j, i] - 360.0,\n self.samples[j, i])\n', (3562, 3650), True, 'import numpy as np\n')] |
from collections import defaultdict
from contextlib import closing
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np # type: ignore
import pandas as pd # type: ignore
from tables import Filters # type: ignore
from tables import open_file
from pullframe import api
from pullframe.types import CacheFormat
class PyTables(api.Persist):
def __init__(
self, directory: Path, complib: str = "blosc", complevel: int = 9
):
super().__init__(directory)
self.complib = complib
self.complevel = complevel
@classmethod
def on(cls, directory: Path) -> "PyTables":
return cls(directory)
def load(
self,
name: str,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
include_start: bool = True,
) -> pd.DataFrame:
with self.__reading_name(name) as h5:
return _read_from_f(h5, start, end, include_start)
def save(self, name: str, df: pd.DataFrame) -> None:
if not self.exists(name):
return self.write(name, df)
prev = self.load(name)
new = prev.reindex(
index=prev.index.union(df.index),
columns=prev.columns.union(df.columns),
)
new.loc[df.index, df.columns] = df.values
self.write(name, new)
def exists(self, name: str) -> bool:
return self.path(name).exists()
def last_index(self, name: str) -> datetime:
with self.__reading_name(name) as h5:
return _load_index(h5)[-1]
def update(self, name: str, path: Path) -> None:
with self.__reading_file(path) as h5:
append = _read_from_f(h5)
self.save(name, append)
@classmethod
def format(cls):
return CacheFormat.PYTABLES
@staticmethod
def suffix():
return "h5"
def write(self, name: str, df: pd.DataFrame) -> None:
self.dump(self.path(name), df)
def dump(self, path: Path, df: pd.DataFrame) -> None:
with self.__writing_file(path) as h5:
_write_to_f(h5, df)
def __reading_name(self, name: str):
return self.__reading_file(self.path(name))
def __reading_file(self, path: Path):
return closing(open_file(path, mode="r"))
def __writing_file(self, path: Path):
filters = Filters(complib=self.complib, complevel=self.complevel)
return closing(open_file(path, mode="w", filters=filters))
Index = int
def _read_from_f(
h5,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
include_start=True,
) -> pd.DataFrame:
index = _load_index(h5)
if start is None:
start_idx: int = 0
else:
start_idx = np.searchsorted(index, start)
if not include_start:
start_idx = max(0, start_idx + 1)
if end is None:
end_idx: int = len(index) # type: ignore
else:
end_idx = np.searchsorted(index, end, side="right")
all_columns = h5.get_node(h5.root, "all_columns").read()
dtypes = h5.get_node(h5.root, "dtypes").read()
df_list = []
for dtyp in dtypes:
node = f"/data/{dtyp.decode()}"
values = h5.get_node(node, "data")[start_idx:end_idx]
columns = h5.get_node(node, "columns").read()
if dtyp == b"str":
values = values.astype("str")
elif dtyp == b"datetime":
values = np.vstack(
[pd.to_datetime(values[:, i]) for i in range(values.shape[1])]
).T
df = pd.DataFrame(
index=index[start_idx:end_idx], columns=columns, data=values
)
df_list.append(df)
df = pd.concat(df_list, axis=1)[all_columns]
if df.columns.dtype == "object":
df.columns = [i.decode() for i in df.columns]
return df
def _load_index(h5):
index = h5.get_node(h5.root, "index").read()
return pd.to_datetime(index)
def _write_to_f(h5, df: pd.DataFrame) -> None:
dtype_to_col_indexes = _dtype_to_col_indexes(df.dtypes)
h5.create_array(
h5.root, "index", df.index.values.astype(np.float64), "index"
)
h5.create_array(h5.root, "all_columns", df.columns.tolist(), "all_columns")
data_grp = h5.create_group(h5.root, "data", "data group")
h5.create_array(
h5.root,
"dtypes",
[_name(i) for i in dtype_to_col_indexes.keys()],
"dtypes",
)
for dtype, indexes in dtype_to_col_indexes.items():
data = df.iloc[:, indexes]
dtype_name = _name(dtype)
group = h5.create_group(data_grp, dtype_name, f"{dtype_name} group")
if dtype_name == "str":
arr = data.values
arr = arr.astype("U")
elif dtype_name == "datetime":
arr = data.values.astype(np.float64)
else:
arr = data.values
h5.create_carray(
where=group, name="data", obj=arr, title=f"{dtype_name} data"
)
h5.create_array(
group, "columns", data.columns.tolist(), f"{dtype_name} columns"
)
def _dtype_to_col_indexes(dtypes) -> Dict[np.dtype, List[Index]]:
result = defaultdict(list)
for i, dtype in enumerate(dtypes):
result[dtype].append(i)
return result
def _name(dtype):
if dtype.name == "object":
return "str"
elif dtype.name == "datetime64[ns]":
return "datetime"
else:
return dtype.name
| [
"pandas.DataFrame",
"numpy.searchsorted",
"tables.open_file",
"collections.defaultdict",
"tables.Filters",
"pandas.concat",
"pandas.to_datetime"
] | [((3939, 3960), 'pandas.to_datetime', 'pd.to_datetime', (['index'], {}), '(index)\n', (3953, 3960), True, 'import pandas as pd\n'), ((5189, 5206), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5200, 5206), False, 'from collections import defaultdict\n'), ((2383, 2438), 'tables.Filters', 'Filters', ([], {'complib': 'self.complib', 'complevel': 'self.complevel'}), '(complib=self.complib, complevel=self.complevel)\n', (2390, 2438), False, 'from tables import Filters\n'), ((2773, 2802), 'numpy.searchsorted', 'np.searchsorted', (['index', 'start'], {}), '(index, start)\n', (2788, 2802), True, 'import numpy as np\n'), ((2978, 3019), 'numpy.searchsorted', 'np.searchsorted', (['index', 'end'], {'side': '"""right"""'}), "(index, end, side='right')\n", (2993, 3019), True, 'import numpy as np\n'), ((3576, 3650), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index[start_idx:end_idx]', 'columns': 'columns', 'data': 'values'}), '(index=index[start_idx:end_idx], columns=columns, data=values)\n', (3588, 3650), True, 'import pandas as pd\n'), ((3710, 3736), 'pandas.concat', 'pd.concat', (['df_list'], {'axis': '(1)'}), '(df_list, axis=1)\n', (3719, 3736), True, 'import pandas as pd\n'), ((2295, 2320), 'tables.open_file', 'open_file', (['path'], {'mode': '"""r"""'}), "(path, mode='r')\n", (2304, 2320), False, 'from tables import open_file\n'), ((2462, 2504), 'tables.open_file', 'open_file', (['path'], {'mode': '"""w"""', 'filters': 'filters'}), "(path, mode='w', filters=filters)\n", (2471, 2504), False, 'from tables import open_file\n'), ((3484, 3512), 'pandas.to_datetime', 'pd.to_datetime', (['values[:, i]'], {}), '(values[:, i])\n', (3498, 3512), True, 'import pandas as pd\n')] |
"""Test module for model-based class metafeatures."""
import pytest
from pymfe.mfe import MFE
from tests.utils import load_xy
import numpy as np
GNAME = "model-based"
class TestModelBased:
"""TestClass dedicated to test model-based metafeatures."""
@pytest.mark.parametrize(
"dt_id, ft_name, exp_value, precompute",
[
###################
# Mixed data
###################
(0, "leaves", 13, True),
(0, "leaves_branch", [4.6153846, 1.4455945], True),
(0, "leaves_corrob", [0.07692308, 0.058791243], True),
(0, "leaves_homo", [84.933334, 41.648125], True),
(0, "leaves_per_class", [0.5, 0.05439285], True),
(0, "nodes", 12, True),
(0, "nodes_per_attr", 1.0909090909090908, True),
(0, "nodes_per_inst", 0.24, True),
(0, "nodes_per_level", [2.0, 0.8944272], True),
(0, "nodes_repeated", [3.0, 2.828427], True),
(0, "tree_depth", [3.84, 1.6753109], True),
(0, "tree_imbalance", [0.16146065, 0.113601856], True),
(0, "tree_shape", [0.20192307, 0.1227767], True),
(0, "var_importance", [0.09090909, 0.1993217], True),
(0, "leaves", 13, False),
(0, "leaves_branch", [4.6153846, 1.4455945], False),
(0, "leaves_corrob", [0.07692308, 0.058791243], False),
(0, "leaves_homo", [84.933334, 41.648125], False),
(0, "leaves_per_class", [0.5, 0.05439285], False),
(0, "nodes", 12, False),
(0, "nodes_per_attr", 1.0909090909090908, False),
(0, "nodes_per_inst", 0.24, False),
(0, "nodes_per_level", [2.0, 0.8944272], False),
(0, "nodes_repeated", [3.0, 2.828427], False),
(0, "tree_depth", [3.84, 1.6753109], False),
(0, "tree_imbalance", [0.16146065, 0.113601856], False),
(0, "tree_shape", [0.20192307, 0.1227767], False),
(0, "var_importance", [0.09090909, 0.1993217], False),
###################
# Categorical data
###################
(1, "leaves", 57, True),
(1, "leaves_branch", [9.140351, 3.136414], True),
(1, "leaves_corrob", [0.01754386, 0.04135247], True),
(1, "leaves_homo", [18342.629, 45953.414], True),
(1, "leaves_per_class", [0.5, 0.11164843], True),
(1, "nodes", 56, True),
(1, "nodes_per_attr", 1.4736842105263157, True),
(1, "nodes_per_inst", 0.017521902377972465, True),
(1, "nodes_per_level", [3.5, 2.4221203], True),
(1, "nodes_repeated", [1.6969697, 0.88334763], True),
(1, "tree_depth", [8.230088, 3.305863], True),
(1, "tree_imbalance", [0.05483275, 0.092559], True),
(1, "tree_shape", [0.052245557, 0.09386974], True),
(1, "var_importance", [0.02631579, 0.06340529], True),
(1, "leaves", 57, False),
(1, "leaves_branch", [9.140351, 3.136414], False),
(1, "leaves_corrob", [0.01754386, 0.04135247], False),
(1, "leaves_homo", [18342.629, 45953.414], False),
(1, "leaves_per_class", [0.5, 0.11164843], False),
(1, "nodes", 56, False),
(1, "nodes_per_attr", 1.4736842105263157, False),
(1, "nodes_per_inst", 0.017521902377972465, False),
(1, "nodes_per_level", [3.5, 2.4221203], False),
(1, "nodes_repeated", [1.6969697, 0.88334763], False),
(1, "tree_depth", [8.230088, 3.305863], False),
(1, "tree_imbalance", [0.05483275, 0.092559], False),
(1, "tree_shape", [0.052245557, 0.09386974], False),
(1, "var_importance", [0.02631579, 0.06340529], False),
###################
# Numerical data
###################
(2, "leaves", 9, True),
(2, "leaves_branch", [3.7777777, 1.2018504], True),
(2, "leaves_corrob", [0.11111111, 0.15051763], True),
(2, "leaves_homo", [37.466667, 13.142298], True),
(2, "leaves_per_class", [0.33333334, 0.22222224], True),
(2, "nodes", 8, True),
(2, "nodes_per_attr", 2.0, True),
(2, "nodes_per_inst", 0.05333333333333334, True),
(2, "nodes_per_level", [1.6, 0.8944272], True),
(2, "nodes_repeated", [2.0, 1.1547005], True),
(2, "tree_depth", [3.0588236, 1.4348601], True),
(2, "tree_imbalance", [0.19491705, 0.1330071], True),
(2, "tree_shape", [0.27083334, 0.107119605], True),
(2, "var_importance", [0.24999999, 0.27823895], True),
(2, "leaves", 9, False),
(2, "leaves_branch", [3.7777777, 1.2018504], False),
(2, "leaves_corrob", [0.11111111, 0.15051763], False),
(2, "leaves_homo", [37.466667, 13.142298], False),
(2, "leaves_per_class", [0.33333334, 0.22222224], False),
(2, "nodes", 8, False),
(2, "nodes_per_attr", 2.0, False),
(2, "nodes_per_inst", 0.05333333333333334, False),
(2, "nodes_per_level", [1.6, 0.8944272], False),
(2, "nodes_repeated", [2.0, 1.1547005], False),
(2, "tree_depth", [3.0588236, 1.4348601], False),
(2, "tree_imbalance", [0.19491705, 0.1330071], False),
(2, "tree_shape", [0.27083334, 0.107119605], False),
(2, "var_importance", [0.24999999, 0.27823895], False),
])
def test_ft_methods_model_based_01(self, dt_id, ft_name, exp_value,
precompute):
"""Function to test each meta-feature belongs to model-based group.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(groups=[GNAME], features=[ft_name], random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
if exp_value is np.nan:
assert value[0] is exp_value
else:
assert np.allclose(value, exp_value)
@pytest.mark.parametrize(
"dt_id, ft_name, exp_value, precompute",
[
###################
# Mixed data
###################
(0, "leaves", 7, True),
(0, "leaves_branch", [3.7142856, 1.7043363], True),
(0, "leaves_corrob", [0.14285713, 0.06575568], True),
(0, "leaves_homo", [32.266666, 15.709021], True),
(0, "leaves_per_class", [0.5, 0.30304578], True),
(0, "nodes", 6, True),
(0, "nodes_per_attr", 0.5454545454545454, True),
(0, "nodes_per_inst", 0.12, True),
(0, "nodes_per_level", [1.2, 0.4472136], True),
(0, "nodes_repeated", [3.0, 1.4142135], True),
(0, "tree_depth", [3.0769231, 1.7541162], True),
(0, "tree_imbalance", [0.19825712, 0.11291388], True),
(0, "tree_shape", [0.2857143, 0.16675964], True),
(0, "var_importance", [0.09090909, 0.2417293], True),
(0, "leaves", 7, False),
(0, "leaves_branch", [3.7142856, 1.7043363], False),
(0, "leaves_corrob", [0.14285713, 0.06575568], False),
(0, "leaves_homo", [32.266666, 15.709021], False),
(0, "leaves_per_class", [0.5, 0.30304578], False),
(0, "nodes", 6, False),
(0, "nodes_per_attr", 0.5454545454545454, False),
(0, "nodes_per_inst", 0.12, False),
(0, "nodes_per_level", [1.2, 0.4472136], False),
(0, "nodes_repeated", [3.0, 1.4142135], False),
(0, "tree_depth", [3.0769231, 1.7541162], False),
(0, "tree_imbalance", [0.19825712, 0.11291388], False),
(0, "tree_shape", [0.2857143, 0.16675964], False),
(0, "var_importance", [0.09090909, 0.2417293], False),
###################
# Categorical data
###################
(1, "leaves", 10, True),
(1, "leaves_branch", [4.3, 1.4944341], True),
(1, "leaves_corrob", [0.1, 0.08727827], True),
(1, "leaves_homo", [55.2, 18.552029], True),
(1, "leaves_per_class", [0.5, 0.2828427], True),
(1, "nodes", 9, True),
(1, "nodes_per_attr", 0.23684210526315788, True),
(1, "nodes_per_inst", 0.002816020025031289, True),
(1, "nodes_per_level", [1.8, 1.3038405], True),
(1, "nodes_repeated", [1.125, 0.35355338], True),
(1, "tree_depth", [3.5789473, 1.6437014], True),
(1, "tree_imbalance", [0.25800052, 0.0827512], True),
(1, "tree_shape", [0.225, 0.14493772], True),
(1, "var_importance", [0.02631579, 0.07277515], True),
(1, "leaves", 10, False),
(1, "leaves_branch", [4.3, 1.4944341], False),
(1, "leaves_corrob", [0.1, 0.08727827], False),
(1, "leaves_homo", [55.2, 18.552029], False),
(1, "leaves_per_class", [0.5, 0.2828427], False),
(1, "nodes", 9, False),
(1, "nodes_per_attr", 0.23684210526315788, False),
(1, "nodes_per_inst", 0.002816020025031289, False),
(1, "nodes_per_level", [1.8, 1.3038405], False),
(1, "nodes_repeated", [1.125, 0.35355338], False),
(1, "tree_depth", [3.5789473, 1.6437014], False),
(1, "tree_imbalance", [0.25800052, 0.0827512], False),
(1, "tree_shape", [0.225, 0.14493772], False),
(1, "var_importance", [0.02631579, 0.07277515], False),
###################
# Numerical data
###################
(2, "leaves", 6, True),
(2, "leaves_branch", [3.0, 1.0954452], True),
(2, "leaves_corrob", [0.16666667, 0.15927614], True),
(2, "leaves_homo", [18.0, 4.8989797], True),
(2, "leaves_per_class", [0.33333334, 0.28867516], True),
(2, "nodes", 5, True),
(2, "nodes_per_attr", 1.25, True),
(2, "nodes_per_inst", 0.03333333333333333, True),
(2, "nodes_per_level", [1.25, 0.5], True),
(2, "nodes_repeated", [2.5, 0.70710677], True),
(2, "tree_depth", [2.3636363, 1.2862914], True),
(2, "tree_imbalance", [0.2524478, 0.1236233], True),
(2, "tree_shape", [0.35416666, 0.094096586], True),
(2, "var_importance", [0.25, 0.31985083], True),
(2, "leaves", 6, False),
(2, "leaves_branch", [3.0, 1.0954452], False),
(2, "leaves_corrob", [0.16666667, 0.15927614], False),
(2, "leaves_homo", [18.0, 4.8989797], False),
(2, "leaves_per_class", [0.33333334, 0.28867516], False),
(2, "nodes", 5, False),
(2, "nodes_per_attr", 1.25, False),
(2, "nodes_per_inst", 0.03333333333333333, False),
(2, "nodes_per_level", [1.25, 0.5], False),
(2, "nodes_repeated", [2.5, 0.70710677], False),
(2, "tree_depth", [2.3636363, 1.2862914], False),
(2, "tree_imbalance", [0.2524478, 0.1236233], False),
(2, "tree_shape", [0.35416666, 0.094096586], False),
(2, "var_importance", [0.25, 0.31985083], False),
])
def test_ft_methods_model_based_02(self, dt_id, ft_name, exp_value,
precompute):
"""Function to test each meta-feature belongs to model-based group.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(
groups=[GNAME],
features=[ft_name],
hypparam_model_dt={
"max_depth": 5,
"min_samples_split": 10,
"criterion": "entropy",
},
random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
if precomp_group is None:
# Note: the precomputation of 'model-based' group is always
# forced due to the need of the 'dt_model' value
mfe._precomp_args_ft = {
"dt_model": mfe._precomp_args_ft.get("dt_model")
}
value = mfe.extract()[1]
if exp_value is np.nan:
assert value[0] is exp_value
else:
assert np.allclose(value, exp_value)
@pytest.mark.parametrize(
"dt_id, exp_value, precompute",
[
###################
# Mixed data
###################
(0, [
13, 4.6153846, 0.07692308, 84.933334, 0.5, 12,
1.0909090909090908, 0.24, 2.0, 3.0, 3.84, 0.16146065,
0.20192307, 0.09090909
], False),
(0, [
13, 4.6153846, 0.07692308, 84.933334, 0.5, 12,
1.0909090909090908, 0.24, 2.0, 3.0, 3.84, 0.16146065,
0.20192307, 0.09090909
], True),
###################
# Numerical data
###################
(2, [
9, 3.7777777, 0.11111111, 37.466667, 0.33333334, 8, 2.0,
0.05333333333333334, 1.6, 2.0, 3.0588236, 0.19491705,
0.27083334, 0.24999999
], False),
(2, [
9, 3.7777777, 0.11111111, 37.466667, 0.33333334, 8, 2.0,
0.05333333333333334, 1.6, 2.0, 3.0588236, 0.19491705,
0.27083334, 0.24999999
], True),
])
def test_integration_model_based(self, dt_id, exp_value, precompute):
"""Function to test all model-based meta-features.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(groups=[GNAME], summary="mean", random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
assert np.allclose(value, exp_value, equal_nan=True)
| [
"pymfe.mfe.MFE",
"pytest.mark.parametrize",
"numpy.allclose",
"tests.utils.load_xy"
] | [((263, 4535), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt_id, ft_name, exp_value, precompute"""', "[(0, 'leaves', 13, True), (0, 'leaves_branch', [4.6153846, 1.4455945], True\n ), (0, 'leaves_corrob', [0.07692308, 0.058791243], True), (0,\n 'leaves_homo', [84.933334, 41.648125], True), (0, 'leaves_per_class', [\n 0.5, 0.05439285], True), (0, 'nodes', 12, True), (0, 'nodes_per_attr', \n 1.0909090909090908, True), (0, 'nodes_per_inst', 0.24, True), (0,\n 'nodes_per_level', [2.0, 0.8944272], True), (0, 'nodes_repeated', [3.0,\n 2.828427], True), (0, 'tree_depth', [3.84, 1.6753109], True), (0,\n 'tree_imbalance', [0.16146065, 0.113601856], True), (0, 'tree_shape', [\n 0.20192307, 0.1227767], True), (0, 'var_importance', [0.09090909, \n 0.1993217], True), (0, 'leaves', 13, False), (0, 'leaves_branch', [\n 4.6153846, 1.4455945], False), (0, 'leaves_corrob', [0.07692308, \n 0.058791243], False), (0, 'leaves_homo', [84.933334, 41.648125], False),\n (0, 'leaves_per_class', [0.5, 0.05439285], False), (0, 'nodes', 12, \n False), (0, 'nodes_per_attr', 1.0909090909090908, False), (0,\n 'nodes_per_inst', 0.24, False), (0, 'nodes_per_level', [2.0, 0.8944272],\n False), (0, 'nodes_repeated', [3.0, 2.828427], False), (0, 'tree_depth',\n [3.84, 1.6753109], False), (0, 'tree_imbalance', [0.16146065, \n 0.113601856], False), (0, 'tree_shape', [0.20192307, 0.1227767], False),\n (0, 'var_importance', [0.09090909, 0.1993217], False), (1, 'leaves', 57,\n True), (1, 'leaves_branch', [9.140351, 3.136414], True), (1,\n 'leaves_corrob', [0.01754386, 0.04135247], True), (1, 'leaves_homo', [\n 18342.629, 45953.414], True), (1, 'leaves_per_class', [0.5, 0.11164843],\n True), (1, 'nodes', 56, True), (1, 'nodes_per_attr', 1.4736842105263157,\n True), (1, 'nodes_per_inst', 0.017521902377972465, True), (1,\n 'nodes_per_level', [3.5, 2.4221203], True), (1, 'nodes_repeated', [\n 1.6969697, 0.88334763], True), (1, 'tree_depth', [8.230088, 3.305863], \n True), (1, 'tree_imbalance', [0.05483275, 0.092559], True), (1,\n 'tree_shape', [0.052245557, 0.09386974], True), (1, 'var_importance', [\n 0.02631579, 0.06340529], True), (1, 'leaves', 57, False), (1,\n 'leaves_branch', [9.140351, 3.136414], False), (1, 'leaves_corrob', [\n 0.01754386, 0.04135247], False), (1, 'leaves_homo', [18342.629, \n 45953.414], False), (1, 'leaves_per_class', [0.5, 0.11164843], False),\n (1, 'nodes', 56, False), (1, 'nodes_per_attr', 1.4736842105263157, \n False), (1, 'nodes_per_inst', 0.017521902377972465, False), (1,\n 'nodes_per_level', [3.5, 2.4221203], False), (1, 'nodes_repeated', [\n 1.6969697, 0.88334763], False), (1, 'tree_depth', [8.230088, 3.305863],\n False), (1, 'tree_imbalance', [0.05483275, 0.092559], False), (1,\n 'tree_shape', [0.052245557, 0.09386974], False), (1, 'var_importance',\n [0.02631579, 0.06340529], False), (2, 'leaves', 9, True), (2,\n 'leaves_branch', [3.7777777, 1.2018504], True), (2, 'leaves_corrob', [\n 0.11111111, 0.15051763], True), (2, 'leaves_homo', [37.466667, \n 13.142298], True), (2, 'leaves_per_class', [0.33333334, 0.22222224], \n True), (2, 'nodes', 8, True), (2, 'nodes_per_attr', 2.0, True), (2,\n 'nodes_per_inst', 0.05333333333333334, True), (2, 'nodes_per_level', [\n 1.6, 0.8944272], True), (2, 'nodes_repeated', [2.0, 1.1547005], True),\n (2, 'tree_depth', [3.0588236, 1.4348601], True), (2, 'tree_imbalance',\n [0.19491705, 0.1330071], True), (2, 'tree_shape', [0.27083334, \n 0.107119605], True), (2, 'var_importance', [0.24999999, 0.27823895], \n True), (2, 'leaves', 9, False), (2, 'leaves_branch', [3.7777777, \n 1.2018504], False), (2, 'leaves_corrob', [0.11111111, 0.15051763], \n False), (2, 'leaves_homo', [37.466667, 13.142298], False), (2,\n 'leaves_per_class', [0.33333334, 0.22222224], False), (2, 'nodes', 8, \n False), (2, 'nodes_per_attr', 2.0, False), (2, 'nodes_per_inst', \n 0.05333333333333334, False), (2, 'nodes_per_level', [1.6, 0.8944272], \n False), (2, 'nodes_repeated', [2.0, 1.1547005], False), (2,\n 'tree_depth', [3.0588236, 1.4348601], False), (2, 'tree_imbalance', [\n 0.19491705, 0.1330071], False), (2, 'tree_shape', [0.27083334, \n 0.107119605], False), (2, 'var_importance', [0.24999999, 0.27823895], \n False)]"], {}), "('dt_id, ft_name, exp_value, precompute', [(0,\n 'leaves', 13, True), (0, 'leaves_branch', [4.6153846, 1.4455945], True),\n (0, 'leaves_corrob', [0.07692308, 0.058791243], True), (0,\n 'leaves_homo', [84.933334, 41.648125], True), (0, 'leaves_per_class', [\n 0.5, 0.05439285], True), (0, 'nodes', 12, True), (0, 'nodes_per_attr', \n 1.0909090909090908, True), (0, 'nodes_per_inst', 0.24, True), (0,\n 'nodes_per_level', [2.0, 0.8944272], True), (0, 'nodes_repeated', [3.0,\n 2.828427], True), (0, 'tree_depth', [3.84, 1.6753109], True), (0,\n 'tree_imbalance', [0.16146065, 0.113601856], True), (0, 'tree_shape', [\n 0.20192307, 0.1227767], True), (0, 'var_importance', [0.09090909, \n 0.1993217], True), (0, 'leaves', 13, False), (0, 'leaves_branch', [\n 4.6153846, 1.4455945], False), (0, 'leaves_corrob', [0.07692308, \n 0.058791243], False), (0, 'leaves_homo', [84.933334, 41.648125], False),\n (0, 'leaves_per_class', [0.5, 0.05439285], False), (0, 'nodes', 12, \n False), (0, 'nodes_per_attr', 1.0909090909090908, False), (0,\n 'nodes_per_inst', 0.24, False), (0, 'nodes_per_level', [2.0, 0.8944272],\n False), (0, 'nodes_repeated', [3.0, 2.828427], False), (0, 'tree_depth',\n [3.84, 1.6753109], False), (0, 'tree_imbalance', [0.16146065, \n 0.113601856], False), (0, 'tree_shape', [0.20192307, 0.1227767], False),\n (0, 'var_importance', [0.09090909, 0.1993217], False), (1, 'leaves', 57,\n True), (1, 'leaves_branch', [9.140351, 3.136414], True), (1,\n 'leaves_corrob', [0.01754386, 0.04135247], True), (1, 'leaves_homo', [\n 18342.629, 45953.414], True), (1, 'leaves_per_class', [0.5, 0.11164843],\n True), (1, 'nodes', 56, True), (1, 'nodes_per_attr', 1.4736842105263157,\n True), (1, 'nodes_per_inst', 0.017521902377972465, True), (1,\n 'nodes_per_level', [3.5, 2.4221203], True), (1, 'nodes_repeated', [\n 1.6969697, 0.88334763], True), (1, 'tree_depth', [8.230088, 3.305863], \n True), (1, 'tree_imbalance', [0.05483275, 0.092559], True), (1,\n 'tree_shape', [0.052245557, 0.09386974], True), (1, 'var_importance', [\n 0.02631579, 0.06340529], True), (1, 'leaves', 57, False), (1,\n 'leaves_branch', [9.140351, 3.136414], False), (1, 'leaves_corrob', [\n 0.01754386, 0.04135247], False), (1, 'leaves_homo', [18342.629, \n 45953.414], False), (1, 'leaves_per_class', [0.5, 0.11164843], False),\n (1, 'nodes', 56, False), (1, 'nodes_per_attr', 1.4736842105263157, \n False), (1, 'nodes_per_inst', 0.017521902377972465, False), (1,\n 'nodes_per_level', [3.5, 2.4221203], False), (1, 'nodes_repeated', [\n 1.6969697, 0.88334763], False), (1, 'tree_depth', [8.230088, 3.305863],\n False), (1, 'tree_imbalance', [0.05483275, 0.092559], False), (1,\n 'tree_shape', [0.052245557, 0.09386974], False), (1, 'var_importance',\n [0.02631579, 0.06340529], False), (2, 'leaves', 9, True), (2,\n 'leaves_branch', [3.7777777, 1.2018504], True), (2, 'leaves_corrob', [\n 0.11111111, 0.15051763], True), (2, 'leaves_homo', [37.466667, \n 13.142298], True), (2, 'leaves_per_class', [0.33333334, 0.22222224], \n True), (2, 'nodes', 8, True), (2, 'nodes_per_attr', 2.0, True), (2,\n 'nodes_per_inst', 0.05333333333333334, True), (2, 'nodes_per_level', [\n 1.6, 0.8944272], True), (2, 'nodes_repeated', [2.0, 1.1547005], True),\n (2, 'tree_depth', [3.0588236, 1.4348601], True), (2, 'tree_imbalance',\n [0.19491705, 0.1330071], True), (2, 'tree_shape', [0.27083334, \n 0.107119605], True), (2, 'var_importance', [0.24999999, 0.27823895], \n True), (2, 'leaves', 9, False), (2, 'leaves_branch', [3.7777777, \n 1.2018504], False), (2, 'leaves_corrob', [0.11111111, 0.15051763], \n False), (2, 'leaves_homo', [37.466667, 13.142298], False), (2,\n 'leaves_per_class', [0.33333334, 0.22222224], False), (2, 'nodes', 8, \n False), (2, 'nodes_per_attr', 2.0, False), (2, 'nodes_per_inst', \n 0.05333333333333334, False), (2, 'nodes_per_level', [1.6, 0.8944272], \n False), (2, 'nodes_repeated', [2.0, 1.1547005], False), (2,\n 'tree_depth', [3.0588236, 1.4348601], False), (2, 'tree_imbalance', [\n 0.19491705, 0.1330071], False), (2, 'tree_shape', [0.27083334, \n 0.107119605], False), (2, 'var_importance', [0.24999999, 0.27823895], \n False)])\n", (286, 4535), False, 'import pytest\n'), ((6198, 10376), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt_id, ft_name, exp_value, precompute"""', "[(0, 'leaves', 7, True), (0, 'leaves_branch', [3.7142856, 1.7043363], True),\n (0, 'leaves_corrob', [0.14285713, 0.06575568], True), (0, 'leaves_homo',\n [32.266666, 15.709021], True), (0, 'leaves_per_class', [0.5, 0.30304578\n ], True), (0, 'nodes', 6, True), (0, 'nodes_per_attr', \n 0.5454545454545454, True), (0, 'nodes_per_inst', 0.12, True), (0,\n 'nodes_per_level', [1.2, 0.4472136], True), (0, 'nodes_repeated', [3.0,\n 1.4142135], True), (0, 'tree_depth', [3.0769231, 1.7541162], True), (0,\n 'tree_imbalance', [0.19825712, 0.11291388], True), (0, 'tree_shape', [\n 0.2857143, 0.16675964], True), (0, 'var_importance', [0.09090909, \n 0.2417293], True), (0, 'leaves', 7, False), (0, 'leaves_branch', [\n 3.7142856, 1.7043363], False), (0, 'leaves_corrob', [0.14285713, \n 0.06575568], False), (0, 'leaves_homo', [32.266666, 15.709021], False),\n (0, 'leaves_per_class', [0.5, 0.30304578], False), (0, 'nodes', 6, \n False), (0, 'nodes_per_attr', 0.5454545454545454, False), (0,\n 'nodes_per_inst', 0.12, False), (0, 'nodes_per_level', [1.2, 0.4472136],\n False), (0, 'nodes_repeated', [3.0, 1.4142135], False), (0,\n 'tree_depth', [3.0769231, 1.7541162], False), (0, 'tree_imbalance', [\n 0.19825712, 0.11291388], False), (0, 'tree_shape', [0.2857143, \n 0.16675964], False), (0, 'var_importance', [0.09090909, 0.2417293], \n False), (1, 'leaves', 10, True), (1, 'leaves_branch', [4.3, 1.4944341],\n True), (1, 'leaves_corrob', [0.1, 0.08727827], True), (1, 'leaves_homo',\n [55.2, 18.552029], True), (1, 'leaves_per_class', [0.5, 0.2828427], \n True), (1, 'nodes', 9, True), (1, 'nodes_per_attr', 0.23684210526315788,\n True), (1, 'nodes_per_inst', 0.002816020025031289, True), (1,\n 'nodes_per_level', [1.8, 1.3038405], True), (1, 'nodes_repeated', [\n 1.125, 0.35355338], True), (1, 'tree_depth', [3.5789473, 1.6437014], \n True), (1, 'tree_imbalance', [0.25800052, 0.0827512], True), (1,\n 'tree_shape', [0.225, 0.14493772], True), (1, 'var_importance', [\n 0.02631579, 0.07277515], True), (1, 'leaves', 10, False), (1,\n 'leaves_branch', [4.3, 1.4944341], False), (1, 'leaves_corrob', [0.1, \n 0.08727827], False), (1, 'leaves_homo', [55.2, 18.552029], False), (1,\n 'leaves_per_class', [0.5, 0.2828427], False), (1, 'nodes', 9, False), (\n 1, 'nodes_per_attr', 0.23684210526315788, False), (1, 'nodes_per_inst',\n 0.002816020025031289, False), (1, 'nodes_per_level', [1.8, 1.3038405], \n False), (1, 'nodes_repeated', [1.125, 0.35355338], False), (1,\n 'tree_depth', [3.5789473, 1.6437014], False), (1, 'tree_imbalance', [\n 0.25800052, 0.0827512], False), (1, 'tree_shape', [0.225, 0.14493772], \n False), (1, 'var_importance', [0.02631579, 0.07277515], False), (2,\n 'leaves', 6, True), (2, 'leaves_branch', [3.0, 1.0954452], True), (2,\n 'leaves_corrob', [0.16666667, 0.15927614], True), (2, 'leaves_homo', [\n 18.0, 4.8989797], True), (2, 'leaves_per_class', [0.33333334, \n 0.28867516], True), (2, 'nodes', 5, True), (2, 'nodes_per_attr', 1.25, \n True), (2, 'nodes_per_inst', 0.03333333333333333, True), (2,\n 'nodes_per_level', [1.25, 0.5], True), (2, 'nodes_repeated', [2.5, \n 0.70710677], True), (2, 'tree_depth', [2.3636363, 1.2862914], True), (2,\n 'tree_imbalance', [0.2524478, 0.1236233], True), (2, 'tree_shape', [\n 0.35416666, 0.094096586], True), (2, 'var_importance', [0.25, \n 0.31985083], True), (2, 'leaves', 6, False), (2, 'leaves_branch', [3.0,\n 1.0954452], False), (2, 'leaves_corrob', [0.16666667, 0.15927614], \n False), (2, 'leaves_homo', [18.0, 4.8989797], False), (2,\n 'leaves_per_class', [0.33333334, 0.28867516], False), (2, 'nodes', 5, \n False), (2, 'nodes_per_attr', 1.25, False), (2, 'nodes_per_inst', \n 0.03333333333333333, False), (2, 'nodes_per_level', [1.25, 0.5], False),\n (2, 'nodes_repeated', [2.5, 0.70710677], False), (2, 'tree_depth', [\n 2.3636363, 1.2862914], False), (2, 'tree_imbalance', [0.2524478, \n 0.1236233], False), (2, 'tree_shape', [0.35416666, 0.094096586], False),\n (2, 'var_importance', [0.25, 0.31985083], False)]"], {}), "('dt_id, ft_name, exp_value, precompute', [(0,\n 'leaves', 7, True), (0, 'leaves_branch', [3.7142856, 1.7043363], True),\n (0, 'leaves_corrob', [0.14285713, 0.06575568], True), (0, 'leaves_homo',\n [32.266666, 15.709021], True), (0, 'leaves_per_class', [0.5, 0.30304578\n ], True), (0, 'nodes', 6, True), (0, 'nodes_per_attr', \n 0.5454545454545454, True), (0, 'nodes_per_inst', 0.12, True), (0,\n 'nodes_per_level', [1.2, 0.4472136], True), (0, 'nodes_repeated', [3.0,\n 1.4142135], True), (0, 'tree_depth', [3.0769231, 1.7541162], True), (0,\n 'tree_imbalance', [0.19825712, 0.11291388], True), (0, 'tree_shape', [\n 0.2857143, 0.16675964], True), (0, 'var_importance', [0.09090909, \n 0.2417293], True), (0, 'leaves', 7, False), (0, 'leaves_branch', [\n 3.7142856, 1.7043363], False), (0, 'leaves_corrob', [0.14285713, \n 0.06575568], False), (0, 'leaves_homo', [32.266666, 15.709021], False),\n (0, 'leaves_per_class', [0.5, 0.30304578], False), (0, 'nodes', 6, \n False), (0, 'nodes_per_attr', 0.5454545454545454, False), (0,\n 'nodes_per_inst', 0.12, False), (0, 'nodes_per_level', [1.2, 0.4472136],\n False), (0, 'nodes_repeated', [3.0, 1.4142135], False), (0,\n 'tree_depth', [3.0769231, 1.7541162], False), (0, 'tree_imbalance', [\n 0.19825712, 0.11291388], False), (0, 'tree_shape', [0.2857143, \n 0.16675964], False), (0, 'var_importance', [0.09090909, 0.2417293], \n False), (1, 'leaves', 10, True), (1, 'leaves_branch', [4.3, 1.4944341],\n True), (1, 'leaves_corrob', [0.1, 0.08727827], True), (1, 'leaves_homo',\n [55.2, 18.552029], True), (1, 'leaves_per_class', [0.5, 0.2828427], \n True), (1, 'nodes', 9, True), (1, 'nodes_per_attr', 0.23684210526315788,\n True), (1, 'nodes_per_inst', 0.002816020025031289, True), (1,\n 'nodes_per_level', [1.8, 1.3038405], True), (1, 'nodes_repeated', [\n 1.125, 0.35355338], True), (1, 'tree_depth', [3.5789473, 1.6437014], \n True), (1, 'tree_imbalance', [0.25800052, 0.0827512], True), (1,\n 'tree_shape', [0.225, 0.14493772], True), (1, 'var_importance', [\n 0.02631579, 0.07277515], True), (1, 'leaves', 10, False), (1,\n 'leaves_branch', [4.3, 1.4944341], False), (1, 'leaves_corrob', [0.1, \n 0.08727827], False), (1, 'leaves_homo', [55.2, 18.552029], False), (1,\n 'leaves_per_class', [0.5, 0.2828427], False), (1, 'nodes', 9, False), (\n 1, 'nodes_per_attr', 0.23684210526315788, False), (1, 'nodes_per_inst',\n 0.002816020025031289, False), (1, 'nodes_per_level', [1.8, 1.3038405], \n False), (1, 'nodes_repeated', [1.125, 0.35355338], False), (1,\n 'tree_depth', [3.5789473, 1.6437014], False), (1, 'tree_imbalance', [\n 0.25800052, 0.0827512], False), (1, 'tree_shape', [0.225, 0.14493772], \n False), (1, 'var_importance', [0.02631579, 0.07277515], False), (2,\n 'leaves', 6, True), (2, 'leaves_branch', [3.0, 1.0954452], True), (2,\n 'leaves_corrob', [0.16666667, 0.15927614], True), (2, 'leaves_homo', [\n 18.0, 4.8989797], True), (2, 'leaves_per_class', [0.33333334, \n 0.28867516], True), (2, 'nodes', 5, True), (2, 'nodes_per_attr', 1.25, \n True), (2, 'nodes_per_inst', 0.03333333333333333, True), (2,\n 'nodes_per_level', [1.25, 0.5], True), (2, 'nodes_repeated', [2.5, \n 0.70710677], True), (2, 'tree_depth', [2.3636363, 1.2862914], True), (2,\n 'tree_imbalance', [0.2524478, 0.1236233], True), (2, 'tree_shape', [\n 0.35416666, 0.094096586], True), (2, 'var_importance', [0.25, \n 0.31985083], True), (2, 'leaves', 6, False), (2, 'leaves_branch', [3.0,\n 1.0954452], False), (2, 'leaves_corrob', [0.16666667, 0.15927614], \n False), (2, 'leaves_homo', [18.0, 4.8989797], False), (2,\n 'leaves_per_class', [0.33333334, 0.28867516], False), (2, 'nodes', 5, \n False), (2, 'nodes_per_attr', 1.25, False), (2, 'nodes_per_inst', \n 0.03333333333333333, False), (2, 'nodes_per_level', [1.25, 0.5], False),\n (2, 'nodes_repeated', [2.5, 0.70710677], False), (2, 'tree_depth', [\n 2.3636363, 1.2862914], False), (2, 'tree_imbalance', [0.2524478, \n 0.1236233], False), (2, 'tree_shape', [0.35416666, 0.094096586], False),\n (2, 'var_importance', [0.25, 0.31985083], False)])\n", (6221, 10376), False, 'import pytest\n'), ((12528, 13203), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt_id, exp_value, precompute"""', '[(0, [13, 4.6153846, 0.07692308, 84.933334, 0.5, 12, 1.0909090909090908, \n 0.24, 2.0, 3.0, 3.84, 0.16146065, 0.20192307, 0.09090909], False), (0,\n [13, 4.6153846, 0.07692308, 84.933334, 0.5, 12, 1.0909090909090908, \n 0.24, 2.0, 3.0, 3.84, 0.16146065, 0.20192307, 0.09090909], True), (2, [\n 9, 3.7777777, 0.11111111, 37.466667, 0.33333334, 8, 2.0, \n 0.05333333333333334, 1.6, 2.0, 3.0588236, 0.19491705, 0.27083334, \n 0.24999999], False), (2, [9, 3.7777777, 0.11111111, 37.466667, \n 0.33333334, 8, 2.0, 0.05333333333333334, 1.6, 2.0, 3.0588236, \n 0.19491705, 0.27083334, 0.24999999], True)]'], {}), "('dt_id, exp_value, precompute', [(0, [13, 4.6153846,\n 0.07692308, 84.933334, 0.5, 12, 1.0909090909090908, 0.24, 2.0, 3.0, \n 3.84, 0.16146065, 0.20192307, 0.09090909], False), (0, [13, 4.6153846, \n 0.07692308, 84.933334, 0.5, 12, 1.0909090909090908, 0.24, 2.0, 3.0, \n 3.84, 0.16146065, 0.20192307, 0.09090909], True), (2, [9, 3.7777777, \n 0.11111111, 37.466667, 0.33333334, 8, 2.0, 0.05333333333333334, 1.6, \n 2.0, 3.0588236, 0.19491705, 0.27083334, 0.24999999], False), (2, [9, \n 3.7777777, 0.11111111, 37.466667, 0.33333334, 8, 2.0, \n 0.05333333333333334, 1.6, 2.0, 3.0588236, 0.19491705, 0.27083334, \n 0.24999999], True)])\n", (12551, 13203), False, 'import pytest\n'), ((5865, 5879), 'tests.utils.load_xy', 'load_xy', (['dt_id'], {}), '(dt_id)\n', (5872, 5879), False, 'from tests.utils import load_xy\n'), ((5894, 5952), 'pymfe.mfe.MFE', 'MFE', ([], {'groups': '[GNAME]', 'features': '[ft_name]', 'random_state': '(1234)'}), '(groups=[GNAME], features=[ft_name], random_state=1234)\n', (5897, 5952), False, 'from pymfe.mfe import MFE\n'), ((11714, 11728), 'tests.utils.load_xy', 'load_xy', (['dt_id'], {}), '(dt_id)\n', (11721, 11728), False, 'from tests.utils import load_xy\n'), ((11743, 11890), 'pymfe.mfe.MFE', 'MFE', ([], {'groups': '[GNAME]', 'features': '[ft_name]', 'hypparam_model_dt': "{'max_depth': 5, 'min_samples_split': 10, 'criterion': 'entropy'}", 'random_state': '(1234)'}), "(groups=[GNAME], features=[ft_name], hypparam_model_dt={'max_depth': 5,\n 'min_samples_split': 10, 'criterion': 'entropy'}, random_state=1234)\n", (11746, 11890), False, 'from pymfe.mfe import MFE\n'), ((13881, 13895), 'tests.utils.load_xy', 'load_xy', (['dt_id'], {}), '(dt_id)\n', (13888, 13895), False, 'from tests.utils import load_xy\n'), ((13910, 13964), 'pymfe.mfe.MFE', 'MFE', ([], {'groups': '[GNAME]', 'summary': '"""mean"""', 'random_state': '(1234)'}), "(groups=[GNAME], summary='mean', random_state=1234)\n", (13913, 13964), False, 'from pymfe.mfe import MFE\n'), ((14082, 14127), 'numpy.allclose', 'np.allclose', (['value', 'exp_value'], {'equal_nan': '(True)'}), '(value, exp_value, equal_nan=True)\n', (14093, 14127), True, 'import numpy as np\n'), ((6162, 6191), 'numpy.allclose', 'np.allclose', (['value', 'exp_value'], {}), '(value, exp_value)\n', (6173, 6191), True, 'import numpy as np\n'), ((12492, 12521), 'numpy.allclose', 'np.allclose', (['value', 'exp_value'], {}), '(value, exp_value)\n', (12503, 12521), True, 'import numpy as np\n')] |
# pulse_sequence.py
# <NAME>
# <EMAIL>
# Last Edited: Mon 28 Feb 2022 11:17:58 GMT
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
PULSE_WIDTH = 1
PULSE_HEIGHT = 0.3 # fraction of height of figure
TAU_WIDTH = 0.05
HORIZOTAL_PADS = (0.05, 0.01)
# --- horizontal dimensions---
LEFT_PAD = 3
NINETY_WIDTH = 3
TAU_WIDTH = 9
HALF_T1_WIDTH = 12
T2_WIDTH = 15
RIGHT_PAD = 3
# ---vertical dimensions---
CHANNEL_HEIGHT = 8
CHANNEL_GAP = 3
TOP_PAD = 2
BOTTOM_PAD = 1
def add_pulse(ax, channel, x0, flip_angle="90"):
if channel == "top":
y0 = BOTTOM_PAD + CHANNEL_HEIGHT + CHANNEL_GAP
elif channel == "bottom":
y0 = BOTTOM_PAD
if flip_angle == "90":
width = NINETY_WIDTH
fc = "k"
elif flip_angle == "180":
width = 2 * NINETY_WIDTH
fc = "w"
ax.add_patch(
Rectangle(
(x0, y0), width, CHANNEL_HEIGHT, facecolor=fc, edgecolor="k",
transform=ax.transAxes
),
)
def add_text(ax, x, y, txt):
ax.text(
x, y, txt, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, fontsize=10,
)
def dotted_line(ax, channel, x):
if channel == "top":
y0 = BOTTOM_PAD + CHANNEL_HEIGHT + CHANNEL_GAP
elif channel == "bottom":
y0 = BOTTOM_PAD
ax.plot(
[x, x], [y0, y0 + CHANNEL_HEIGHT], color="k", linestyle=":", linewidth=1.5,
transform=ax.transAxes
)
# ==============================
horizontal_total = (
LEFT_PAD + RIGHT_PAD + 4 * TAU_WIDTH + 2 * HALF_T1_WIDTH +
10 * NINETY_WIDTH + T2_WIDTH
)
LEFT_PAD, RIGHT_PAD, TAU_WIDTH, HALF_T1_WIDTH, NINETY_WIDTH, T2_WIDTH = [
x / horizontal_total for x in
[LEFT_PAD, RIGHT_PAD, TAU_WIDTH, HALF_T1_WIDTH, NINETY_WIDTH, T2_WIDTH]
]
vertical_total = BOTTOM_PAD + TOP_PAD + CHANNEL_GAP + 2 * CHANNEL_HEIGHT
BOTTOM_PAD, TOP_PAD, CHANNEL_GAP, CHANNEL_HEIGHT = [
x / vertical_total for x in
[BOTTOM_PAD, TOP_PAD, CHANNEL_GAP, CHANNEL_HEIGHT]
]
fig = plt.figure(figsize=(6, 2))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis("off")
# Lower channel line
ax.plot(
[LEFT_PAD, 1 - RIGHT_PAD], [BOTTOM_PAD, BOTTOM_PAD],
color="k", solid_capstyle="round",
transform=ax.transAxes,
)
# Higher channel line
ax.plot(
[LEFT_PAD, 1 - RIGHT_PAD], 2 * [BOTTOM_PAD + CHANNEL_HEIGHT + CHANNEL_GAP],
color="k", solid_capstyle="round",
transform=ax.transAxes,
)
# useful horizonal positions
end_of_inept = LEFT_PAD + 4 * NINETY_WIDTH + 2 * TAU_WIDTH
end_of_t1 = end_of_inept + 2 * HALF_T1_WIDTH + 3 * NINETY_WIDTH
acquisition = end_of_t1 + 3 * NINETY_WIDTH + 2 * TAU_WIDTH
add_pulse(ax, "top", LEFT_PAD)
add_pulse(ax, "top", LEFT_PAD + NINETY_WIDTH + TAU_WIDTH, "180")
add_pulse(ax, "bottom", LEFT_PAD + NINETY_WIDTH + TAU_WIDTH, "180")
add_pulse(ax, "top", LEFT_PAD + 3 * NINETY_WIDTH + 2 * TAU_WIDTH)
add_pulse(ax, "bottom", LEFT_PAD + 3 * NINETY_WIDTH + 2 * TAU_WIDTH)
add_pulse(ax, "top", end_of_inept + HALF_T1_WIDTH, "180")
add_pulse(ax, "top", end_of_inept + 2 * HALF_T1_WIDTH + 2 * NINETY_WIDTH)
add_pulse(ax, "bottom", end_of_inept + 2 * HALF_T1_WIDTH + 2 * NINETY_WIDTH)
add_pulse(ax, "top", end_of_t1 + TAU_WIDTH, "180")
add_pulse(ax, "bottom", end_of_t1 + TAU_WIDTH, "180")
add_pulse(ax, "top", end_of_t1 + 2 * TAU_WIDTH + 2 * NINETY_WIDTH)
ax.add_patch(
Rectangle(
(acquisition, BOTTOM_PAD), T2_WIDTH, 0.4 * CHANNEL_HEIGHT,
transform=ax.transAxes, facecolor="#a0a0a0", edgecolor="none",
)
)
dotted_line(ax, "bottom", LEFT_PAD + NINETY_WIDTH)
dotted_line(ax, "bottom", LEFT_PAD + 5 * NINETY_WIDTH + 2 * TAU_WIDTH + HALF_T1_WIDTH)
dotted_line(ax, "bottom", LEFT_PAD + 9 * NINETY_WIDTH + 4 * TAU_WIDTH + 2 * HALF_T1_WIDTH)
add_text(ax, LEFT_PAD + NINETY_WIDTH + (0.5 * TAU_WIDTH), BOTTOM_PAD + 0.5 * CHANNEL_HEIGHT, r"$\tau$")
add_text(ax, LEFT_PAD + 3 * NINETY_WIDTH + (1.5 * TAU_WIDTH), BOTTOM_PAD + 0.5 * CHANNEL_HEIGHT, r"$\tau$")
add_text(ax, LEFT_PAD + NINETY_WIDTH + (0.5 * TAU_WIDTH), BOTTOM_PAD + 1.5 * CHANNEL_HEIGHT + CHANNEL_GAP, r"$\tau$")
add_text(ax, LEFT_PAD + 3 * NINETY_WIDTH + (1.5 * TAU_WIDTH), BOTTOM_PAD + 1.5 * CHANNEL_HEIGHT + CHANNEL_GAP, r"$\tau$")
add_text(ax, end_of_inept + 0.5 * HALF_T1_WIDTH + 0.5 * NINETY_WIDTH, BOTTOM_PAD + 0.5 * CHANNEL_HEIGHT, r"$\frac{t_1}{2}$")
add_text(ax, end_of_inept + 1.5 * HALF_T1_WIDTH + 1.5 * NINETY_WIDTH, BOTTOM_PAD + 0.5 * CHANNEL_HEIGHT, r"$\frac{t_1}{2}$")
add_text(ax, end_of_t1 + 0.5 * TAU_WIDTH, BOTTOM_PAD + 0.5 * CHANNEL_HEIGHT, r"$\tau$")
add_text(ax, end_of_t1 + 1.5 * TAU_WIDTH + 2 * NINETY_WIDTH, BOTTOM_PAD + 0.5 * CHANNEL_HEIGHT, r"$\tau$")
add_text(ax, end_of_t1 + 0.5 * TAU_WIDTH, BOTTOM_PAD + 1.5 * CHANNEL_HEIGHT + CHANNEL_GAP, r"$\tau$")
add_text(ax, end_of_t1 + 1.5 * TAU_WIDTH + 2 * NINETY_WIDTH, BOTTOM_PAD + 1.5 * CHANNEL_HEIGHT + CHANNEL_GAP, r"$\tau$")
add_text(ax, acquisition + 0.5 * T2_WIDTH, BOTTOM_PAD + 0.2 * CHANNEL_HEIGHT, "decouple")
add_text(ax, end_of_inept - 0.5 * NINETY_WIDTH, BOTTOM_PAD + 2 * CHANNEL_HEIGHT + CHANNEL_GAP + 0.04, "$y$")
add_text(ax, end_of_t1 - 0.5 * NINETY_WIDTH, BOTTOM_PAD + CHANNEL_HEIGHT + 0.04, r"$x,y$")
tp = np.linspace(acquisition, acquisition + T2_WIDTH, 256)
fid = (BOTTOM_PAD + CHANNEL_HEIGHT + CHANNEL_GAP) + (
0.5 * CHANNEL_HEIGHT * np.cos(300 * (tp - acquisition)) *
np.exp(np.linspace(0, -4, tp.size))
)
ax.plot(tp, fid, transform=ax.transAxes, color="k", lw=1.4)
fig.savefig("hsqc.pdf")
fig.savefig("hsqc.png")
| [
"matplotlib.pyplot.figure",
"matplotlib.patches.Rectangle",
"numpy.linspace",
"numpy.cos"
] | [((2047, 2073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 2)'}), '(figsize=(6, 2))\n', (2057, 2073), True, 'import matplotlib.pyplot as plt\n'), ((5177, 5230), 'numpy.linspace', 'np.linspace', (['acquisition', '(acquisition + T2_WIDTH)', '(256)'], {}), '(acquisition, acquisition + T2_WIDTH, 256)\n', (5188, 5230), True, 'import numpy as np\n'), ((3371, 3506), 'matplotlib.patches.Rectangle', 'Rectangle', (['(acquisition, BOTTOM_PAD)', 'T2_WIDTH', '(0.4 * CHANNEL_HEIGHT)'], {'transform': 'ax.transAxes', 'facecolor': '"""#a0a0a0"""', 'edgecolor': '"""none"""'}), "((acquisition, BOTTOM_PAD), T2_WIDTH, 0.4 * CHANNEL_HEIGHT,\n transform=ax.transAxes, facecolor='#a0a0a0', edgecolor='none')\n", (3380, 3506), False, 'from matplotlib.patches import Rectangle\n'), ((871, 970), 'matplotlib.patches.Rectangle', 'Rectangle', (['(x0, y0)', 'width', 'CHANNEL_HEIGHT'], {'facecolor': 'fc', 'edgecolor': '"""k"""', 'transform': 'ax.transAxes'}), "((x0, y0), width, CHANNEL_HEIGHT, facecolor=fc, edgecolor='k',\n transform=ax.transAxes)\n", (880, 970), False, 'from matplotlib.patches import Rectangle\n'), ((5312, 5344), 'numpy.cos', 'np.cos', (['(300 * (tp - acquisition))'], {}), '(300 * (tp - acquisition))\n', (5318, 5344), True, 'import numpy as np\n'), ((5358, 5385), 'numpy.linspace', 'np.linspace', (['(0)', '(-4)', 'tp.size'], {}), '(0, -4, tp.size)\n', (5369, 5385), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# test_numpy.py
# testing script for writing various numpy tensors to QG8 files using
# python -m pytest -rP tests/test_numpy.py
#
# Author : <NAME> <<EMAIL>>
# Date created : 18 July 2021
#
# Copyright 2021 University of Strasbourg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from qg8.core import *
from qg8.constants import *
from qg8 import from_numpy
graph = qg8_graph_create()
tests = []
# TEST 1
data = 324
dtype = 'int64'
test = {"description": "TEST1: Constant integer saved as rank 1 tensor " + dtype,
"first_val": np.array(data, dtype),
"last_val": np.array(data, dtype),
"type": QG8_TYPE_CONSTANT,
"packing": QG8_PACKING_SPARSE_COO,
"string_id": "constant",
"dtype": dtype,
"itype": 'uint8',
"dims": np.atleast_1d(data).shape,
"bytes": 16 + 8 + 2
}
chunk = qg8_chunk_create(test["type"], tensor=from_numpy(data, dtype=dtype, packing=test["packing"]), string_id=test["string_id"])
qg8_graph_add_chunk(graph, chunk)
tests.append(test)
# TEST 2
data = np.random.randint(0, 2, size=2**16).astype('bool')
dtype = 'uint8'
test = {"description": "TEST2: large 1D boolean array (2**16 elements) saved as sparse tensor " + dtype,
"first_val": np.array(data[data!=0].item(0), dtype),
"last_val": np.array(data[data!=0].item(-1), dtype),
"type": QG8_TYPE_CONSTANT,
"packing": QG8_PACKING_SPARSE_COO,
"string_id": "1D sparse array",
"dtype": dtype,
"itype": 'uint32',
"dims": data.shape,
"bytes": 16 + 1*np.count_nonzero(data) + 4*(np.count_nonzero(data)+1)*len(data.shape)
}
chunk = qg8_chunk_create(test["type"], tensor=from_numpy(data, packing=test["packing"]), string_id=test["string_id"])
qg8_graph_add_chunk(graph, chunk)
tests.append(test)
# TEST 3
data = np.zeros((2**8, 2**8))
dtype = 'float32'
test = {"description": "TEST3: large 2D array of zeros (256 x 256 elements) saved as dense tensor " + dtype,
"first_val": np.array(data.item(0), dtype),
"last_val": np.array(data.item(-1), dtype),
"type": QG8_TYPE_CONSTANT,
"packing": QG8_PACKING_FULL,
"string_id": "2D dense array",
"dtype": dtype,
"itype": 'uint16',
"dims": data.shape,
"bytes": 16 + 4*data.size + 2*(data.size+1)*len(data.shape)
}
chunk = qg8_chunk_create(test["type"], tensor=from_numpy(data, dtype=dtype, packing=test["packing"]), string_id=test["string_id"])
qg8_graph_add_chunk(graph, chunk)
tests.append(test)
# TEST 4
data = np.random.randint(0, 2**16-1, size=(1, 2, 3, 4, 5, 6))
dtype = 'uint16'
test = {"description": "TEST4: rank 6 tensor saved in sparse format " + dtype + ", without a label",
"first_val": np.array(data[data!=0].item(0), dtype),
"last_val": np.array(data[data!=0].item(-1), dtype),
"type": QG8_TYPE_CONSTANT,
"packing": QG8_PACKING_SPARSE_COO,
"string_id": None,
"dtype": dtype,
"itype": 'uint8',
"dims": data.shape,
"bytes": 16 + 2*np.count_nonzero(data) + 1*(np.count_nonzero(data)+1)*len(data.shape)
}
chunk = qg8_chunk_create(test["type"], tensor=from_numpy(data, dtype=dtype, packing=test["packing"]), string_id=test["string_id"])
qg8_graph_add_chunk(graph, chunk)
tests.append(test)
# TEST 5
data = np.random.randint(-1,1,size=(64, 64))+1j*np.random.randint(-1,1,size=(64, 64))
dtype = 'complex128'
test = {"description": "TEST5: complex tensor saved in sparse format " + dtype + ", with dangerous label",
"first_val": np.array(data[data!=0].item(0), dtype),
"last_val": np.array(data[data!=0].item(-1), dtype),
"type": QG8_TYPE_INPUT,
"packing": QG8_PACKING_SPARSE_COO,
"string_id": "~this\label%is!too long",
"dtype": dtype,
"itype": 'uint8',
"dims": data.shape,
"bytes": 16 + 16*np.count_nonzero(data) + 1*(np.count_nonzero(data)+1)*len(data.shape)
}
chunk = qg8_chunk_create(test["type"], tensor=from_numpy(data, dtype=dtype, packing=test["packing"]), string_id=test["string_id"])
qg8_graph_add_chunk(graph, chunk)
tests.append(test)
# write all chunks to file and reload it as a new graph
qg8_graph_write('tests/test_numpy.qg8', graph)
graph_new = qg8_graph_load('tests/test_numpy.qg8')
# run module
def run_test(t,chunk):
print("")
print(t["description"])
print("first value: ", end="")
if chunk.tensor.dtype_id in (QG8_DTYPE_COMPLEX64, QG8_DTYPE_COMPLEX128):
first_val = chunk.tensor.re[0]+1j*chunk.tensor.im[0]
else:
first_val = chunk.tensor.re[0]
print(t["first_val"], "-> ", first_val)
assert t["first_val"] == first_val
print("last value: ", end="")
if chunk.tensor.dtype_id in (QG8_DTYPE_COMPLEX64, QG8_DTYPE_COMPLEX128):
last_val = chunk.tensor.re[-1]+1j*chunk.tensor.im[-1]
else:
last_val = chunk.tensor.re[-1]
print(t["last_val"], "-> ", last_val)
assert t["last_val"] == last_val
print("type: ", end="")
print(t["type"], "-> ", chunk.type)
assert t["type"] == chunk.type
print("packing: ", end="")
print(t["packing"], "-> ", chunk.tensor.packing)
assert t["packing"] == chunk.tensor.packing
print("dtype: ", end="")
print(t["dtype"], "-> ", chunk.tensor.dtype)
assert t["dtype"] == chunk.tensor.dtype
print("itype: ", end="")
print(t["itype"], "-> ", chunk.tensor.itype)
assert t["itype"] == chunk.tensor.itype
print("string_id: ", end="")
print(t["string_id"], "->", chunk.string_id)
if t["string_id"] is not None:
assert t["string_id"][0:16] == chunk.string_id
else:
assert t["string_id"] == chunk.string_id
print("dims: ", end="")
print(t["dims"], "->", chunk.tensor.dims)
assert t["dims"] == chunk.tensor.dims
print("bytes: ", end="")
print(t["bytes"], "->", chunk.tensor.datasize())
assert t["bytes"] == chunk.tensor.datasize()
# run all tests
def test_1():
run_test(tests[0], graph_new.chunks[0])
def test_2():
run_test(tests[1], graph_new.chunks[1])
def test_3():
run_test(tests[2], graph_new.chunks[2])
def test_4():
run_test(tests[3], graph_new.chunks[3])
def test_5():
run_test(tests[4], graph_new.chunks[4])
test_1()
test_2()
test_3()
test_4()
test_5()
| [
"qg8.from_numpy",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.atleast_1d"
] | [((2377, 2403), 'numpy.zeros', 'np.zeros', (['(2 ** 8, 2 ** 8)'], {}), '((2 ** 8, 2 ** 8))\n', (2385, 2403), True, 'import numpy as np\n'), ((3101, 3159), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 16 - 1)'], {'size': '(1, 2, 3, 4, 5, 6)'}), '(0, 2 ** 16 - 1, size=(1, 2, 3, 4, 5, 6))\n', (3118, 3159), True, 'import numpy as np\n'), ((1081, 1102), 'numpy.array', 'np.array', (['data', 'dtype'], {}), '(data, dtype)\n', (1089, 1102), True, 'import numpy as np\n'), ((1124, 1145), 'numpy.array', 'np.array', (['data', 'dtype'], {}), '(data, dtype)\n', (1132, 1145), True, 'import numpy as np\n'), ((3885, 3924), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(1)'], {'size': '(64, 64)'}), '(-1, 1, size=(64, 64))\n', (3902, 3924), True, 'import numpy as np\n'), ((1324, 1343), 'numpy.atleast_1d', 'np.atleast_1d', (['data'], {}), '(data)\n', (1337, 1343), True, 'import numpy as np\n'), ((1436, 1490), 'qg8.from_numpy', 'from_numpy', (['data'], {'dtype': 'dtype', 'packing': "test['packing']"}), "(data, dtype=dtype, packing=test['packing'])\n", (1446, 1490), False, 'from qg8 import from_numpy\n'), ((1592, 1629), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(2 ** 16)'}), '(0, 2, size=2 ** 16)\n', (1609, 1629), True, 'import numpy as np\n'), ((2234, 2275), 'qg8.from_numpy', 'from_numpy', (['data'], {'packing': "test['packing']"}), "(data, packing=test['packing'])\n", (2244, 2275), False, 'from qg8 import from_numpy\n'), ((2945, 2999), 'qg8.from_numpy', 'from_numpy', (['data'], {'dtype': 'dtype', 'packing': "test['packing']"}), "(data, dtype=dtype, packing=test['packing'])\n", (2955, 2999), False, 'from qg8 import from_numpy\n'), ((3729, 3783), 'qg8.from_numpy', 'from_numpy', (['data'], {'dtype': 'dtype', 'packing': "test['packing']"}), "(data, dtype=dtype, packing=test['packing'])\n", (3739, 3783), False, 'from qg8 import from_numpy\n'), ((3926, 3965), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(1)'], {'size': '(64, 64)'}), '(-1, 1, size=(64, 64))\n', (3943, 3965), True, 'import numpy as np\n'), ((4566, 4620), 'qg8.from_numpy', 'from_numpy', (['data'], {'dtype': 'dtype', 'packing': "test['packing']"}), "(data, dtype=dtype, packing=test['packing'])\n", (4576, 4620), False, 'from qg8 import from_numpy\n'), ((2107, 2129), 'numpy.count_nonzero', 'np.count_nonzero', (['data'], {}), '(data)\n', (2123, 2129), True, 'import numpy as np\n'), ((3603, 3625), 'numpy.count_nonzero', 'np.count_nonzero', (['data'], {}), '(data)\n', (3619, 3625), True, 'import numpy as np\n'), ((4440, 4462), 'numpy.count_nonzero', 'np.count_nonzero', (['data'], {}), '(data)\n', (4456, 4462), True, 'import numpy as np\n'), ((2135, 2157), 'numpy.count_nonzero', 'np.count_nonzero', (['data'], {}), '(data)\n', (2151, 2157), True, 'import numpy as np\n'), ((3631, 3653), 'numpy.count_nonzero', 'np.count_nonzero', (['data'], {}), '(data)\n', (3647, 3653), True, 'import numpy as np\n'), ((4468, 4490), 'numpy.count_nonzero', 'np.count_nonzero', (['data'], {}), '(data)\n', (4484, 4490), True, 'import numpy as np\n')] |
import nhpp
import math
import numpy as np
import pandas as pd
import pytest
@pytest.mark.parametrize("test_input,expected", [
({0: 1, 2: 1, 1: 0}, ([0, 1, 2], [1, 0, 1])),
({0: 1, 3: 1, 2: 2}, ([0, 2, 3], [1, 2, 1])),
])
def test_sorting(test_input, expected):
assert nhpp.nhpp._get_sorted_pairs(test_input) == expected
@pytest.mark.parametrize("test_input,expected", [
(0, 0),
(1, 5),
(0.5, 2.5),
(3.5, 2.5)
])
def test_piecewise_interp(test_input, expected):
knot_times = [0, 1, 2, 3, 5]
knot_vals = [0, 5, 1, 2, 4]
knots = dict(zip(knot_times, knot_vals))
assert nhpp.nhpp._get_piecewise_val(knots, test_input) == expected
def test_non_dict_error_catch():
knots = [0, 1, 2]
with pytest.raises(TypeError):
nhpp.get_arrivals(knots)
def test_negative_rate_error_catch():
knots = {0: 0, 1: -1, 2: 2, 3: 0}
with pytest.raises(ValueError):
nhpp.get_arrivals(knots)
def test_rate_slopes_error_catch():
knot_times = [0, 1, 2, 3, 4]
knot_vals = [0, 0, 1, 2, 3]
with pytest.raises(ValueError):
nhpp.nhpp._get_rate_slopes(knot_times, knot_vals)
def get_epsilon(knots, bins, func=None, *args, **kwargs):
knots = {0: 1, 1: 0, 2: 2}
bins = 10
data = []
max_knot_val = max(knots.values())
min_knot_dom = min(knots.keys())
max_knot_dom = max(knots.keys())
for i in range(100000):
arrivals = nhpp.get_arrivals(knots)
data.append(np.histogram(arrivals, bins, (min_knot_dom, max_knot_dom))[0])
_, bin_measure = np.histogram(arrivals, bins, (min_knot_dom, max_knot_dom))
df = pd.DataFrame(data)
if func:
check_against = [func(measure, *args, **kwargs) for measure in np.linspace(min_knot_dom, max_knot_dom, bins)]
else:
check_against = [nhpp.nhpp. _get_piecewise_val(knots, measure) for measure in np.linspace(0, 2, bins)]
max_val = max(check_against)
check = max_val*df.sum()/df.sum().max()
check, check_against = np.array(check), np.array(check_against)
return np.sum(check - check_against)
def test_eps_no_func_1():
knots = {0: 1, 1: 0, 2: 2}
bins = 10
assert(get_epsilon(knots, bins) < 1)
def test_eps_with_func_1():
knots = {0: 3, math.pi/2: 9, math.pi: 3, 3*math.pi/2: 0, 2*math.pi: 3}
bins = 10
def test_func(t):
return 3*np.sin(t) + 3
assert(get_epsilon(knots, bins, test_func) < 1)
def test_eps_with_func_2():
knots = {0: 0, 2.5: 8, 5: 0}
bins = 10
def test_func(t):
return t*(5-t)
assert(get_epsilon(knots, bins, test_func) < 1)
def test_non_dominating_piecewise():
knots = {0: 0, 2.5: 6.25, 5: 0}
bins = 10
def test_func(t):
return t*(5-t)
with pytest.raises(ValueError):
nhpp.get_arrivals(knots, test_func)
| [
"numpy.histogram",
"nhpp.nhpp._get_piecewise_val",
"nhpp.get_arrivals",
"numpy.sin",
"pytest.mark.parametrize",
"numpy.sum",
"nhpp.nhpp._get_rate_slopes",
"pytest.raises",
"numpy.array",
"numpy.linspace",
"pandas.DataFrame",
"nhpp.nhpp._get_sorted_pairs"
] | [((80, 241), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input,expected"""', '[({(0): 1, (2): 1, (1): 0}, ([0, 1, 2], [1, 0, 1])), ({(0): 1, (3): 1, (2):\n 2}, ([0, 2, 3], [1, 2, 1]))]'], {}), "('test_input,expected', [({(0): 1, (2): 1, (1): 0},\n ([0, 1, 2], [1, 0, 1])), ({(0): 1, (3): 1, (2): 2}, ([0, 2, 3], [1, 2, \n 1]))])\n", (103, 241), False, 'import pytest\n'), ((330, 422), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input,expected"""', '[(0, 0), (1, 5), (0.5, 2.5), (3.5, 2.5)]'], {}), "('test_input,expected', [(0, 0), (1, 5), (0.5, 2.5),\n (3.5, 2.5)])\n", (353, 422), False, 'import pytest\n'), ((1450, 1508), 'numpy.histogram', 'np.histogram', (['arrivals', 'bins', '(min_knot_dom, max_knot_dom)'], {}), '(arrivals, bins, (min_knot_dom, max_knot_dom))\n', (1462, 1508), True, 'import numpy as np\n'), ((1515, 1533), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1527, 1533), True, 'import pandas as pd\n'), ((1912, 1941), 'numpy.sum', 'np.sum', (['(check - check_against)'], {}), '(check - check_against)\n', (1918, 1941), True, 'import numpy as np\n'), ((275, 314), 'nhpp.nhpp._get_sorted_pairs', 'nhpp.nhpp._get_sorted_pairs', (['test_input'], {}), '(test_input)\n', (302, 314), False, 'import nhpp\n'), ((587, 634), 'nhpp.nhpp._get_piecewise_val', 'nhpp.nhpp._get_piecewise_val', (['knots', 'test_input'], {}), '(knots, test_input)\n', (615, 634), False, 'import nhpp\n'), ((707, 731), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (720, 731), False, 'import pytest\n'), ((735, 759), 'nhpp.get_arrivals', 'nhpp.get_arrivals', (['knots'], {}), '(knots)\n', (752, 759), False, 'import nhpp\n'), ((841, 866), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (854, 866), False, 'import pytest\n'), ((870, 894), 'nhpp.get_arrivals', 'nhpp.get_arrivals', (['knots'], {}), '(knots)\n', (887, 894), False, 'import nhpp\n'), ((999, 1024), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1012, 1024), False, 'import pytest\n'), ((1028, 1077), 'nhpp.nhpp._get_rate_slopes', 'nhpp.nhpp._get_rate_slopes', (['knot_times', 'knot_vals'], {}), '(knot_times, knot_vals)\n', (1054, 1077), False, 'import nhpp\n'), ((1330, 1354), 'nhpp.get_arrivals', 'nhpp.get_arrivals', (['knots'], {}), '(knots)\n', (1347, 1354), False, 'import nhpp\n'), ((1863, 1878), 'numpy.array', 'np.array', (['check'], {}), '(check)\n', (1871, 1878), True, 'import numpy as np\n'), ((1880, 1903), 'numpy.array', 'np.array', (['check_against'], {}), '(check_against)\n', (1888, 1903), True, 'import numpy as np\n'), ((2534, 2559), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2547, 2559), False, 'import pytest\n'), ((2563, 2598), 'nhpp.get_arrivals', 'nhpp.get_arrivals', (['knots', 'test_func'], {}), '(knots, test_func)\n', (2580, 2598), False, 'import nhpp\n'), ((1682, 1726), 'nhpp.nhpp._get_piecewise_val', 'nhpp.nhpp._get_piecewise_val', (['knots', 'measure'], {}), '(knots, measure)\n', (1710, 1726), False, 'import nhpp\n'), ((1369, 1427), 'numpy.histogram', 'np.histogram', (['arrivals', 'bins', '(min_knot_dom, max_knot_dom)'], {}), '(arrivals, bins, (min_knot_dom, max_knot_dom))\n', (1381, 1427), True, 'import numpy as np\n'), ((1609, 1654), 'numpy.linspace', 'np.linspace', (['min_knot_dom', 'max_knot_dom', 'bins'], {}), '(min_knot_dom, max_knot_dom, bins)\n', (1620, 1654), True, 'import numpy as np\n'), ((1743, 1766), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'bins'], {}), '(0, 2, bins)\n', (1754, 1766), True, 'import numpy as np\n'), ((2190, 2199), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2196, 2199), True, 'import numpy as np\n')] |
# -*-coding:utf-8 -*-
# Reference:**********************************************
# @Time : 2019-08-22 21:30
# @Author : <NAME>
# @File : cv2_test.py
# @User : liyihao
# @Software: PyCharm
# @Description: line regression
# Reference:**********************************************
import numpy as np
import random
import torch
# hypothesis function
def inference(theta1, theta0, x):
pred_h = theta1 * x + theta0 # (theta1, theta0) = theta [theta0: bias, theta1: weight]
return pred_h
# cost function
def eval_loss(theta1, theta0, x_list, gt_y_list):
avg_loss = 0.0
for i in range(len(x_list)):
avg_loss += 0.5 * (theta1 * x_list[i] + theta0 - gt_y_list[i]) ** 2
avg_loss /= len(gt_y_list)
return avg_loss
def gradient(pred_h, gt_y, x): # 求导
diff = pred_h - gt_y
d_theta1 = diff * x
d_theta0 = diff
return d_theta1, d_theta0
def cal_step_gradient(batch_x_list, batch_gt_y_list, w, b, lr):
avg_dw, avg_db = 0, 0
batch_size = len(batch_x_list)
for i in range(batch_size):
pred_y = inference(w, b, batch_x_list[i])
dw, db = gradient(pred_y, batch_gt_y_list[i], batch_x_list[i])
avg_dw += dw
avg_db += db
avg_db /= batch_size
avg_dw /= batch_size
w -= lr * avg_dw
b -= lr * avg_db
return w, b
def train(x_list, gt_y_list, batch_size, lr, max_iter):
w = 0
b = 0
num_samples = len(x_list)
for i in range(max_iter):
batch_idxs = np.random.choice(num_samples, batch_size)
batch_x = [x_list[j] for j in batch_idxs]
batch_y = [gt_y_list[j] for j in batch_idxs]
w, b = cal_step_gradient(batch_x, batch_y, w, b, lr)
print("w: {0}, b: {1}".format(w, b))
print("loss is : {0}".format(eval_loss(w, b, x_list, gt_y_list)))
def gen_sample_data():
w = random.randint(0, 10) + random.random()
b = random.randint(0, 5) + random.random()
num_samples = 100
x_list = []
y_list = []
for i in range(num_samples):
x = random.randint(0, 100) * random.random()
y = w * x + b + random.random() * random.randint(-1, 1)
x_list.append(x)
y_list.append(y)
return x_list, y_list, w, b
def run():
x_list, y_list, w, b = gen_sample_data()
lr = 0.0009
max_iter = 10000
train(x_list, y_list, 50, lr, max_iter)
if __name__ == '__main__':
run()
| [
"numpy.random.choice",
"random.random",
"random.randint"
] | [((1482, 1523), 'numpy.random.choice', 'np.random.choice', (['num_samples', 'batch_size'], {}), '(num_samples, batch_size)\n', (1498, 1523), True, 'import numpy as np\n'), ((1840, 1861), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1854, 1861), False, 'import random\n'), ((1864, 1879), 'random.random', 'random.random', ([], {}), '()\n', (1877, 1879), False, 'import random\n'), ((1888, 1908), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (1902, 1908), False, 'import random\n'), ((1911, 1926), 'random.random', 'random.random', ([], {}), '()\n', (1924, 1926), False, 'import random\n'), ((2026, 2048), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (2040, 2048), False, 'import random\n'), ((2051, 2066), 'random.random', 'random.random', ([], {}), '()\n', (2064, 2066), False, 'import random\n'), ((2091, 2106), 'random.random', 'random.random', ([], {}), '()\n', (2104, 2106), False, 'import random\n'), ((2109, 2130), 'random.randint', 'random.randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2123, 2130), False, 'import random\n')] |
import numpy as np
#import pickle
#A = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = list, comments = '#', delimiter = ',', usecols = (0,1,2,3))
#A = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (2), ndmin = 1)
#B = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (3), ndmin = 1)
#A = np.genfromtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (2), ndmin = 1)
#with open('Rock_Paper_Scissors_Raw.pkl', 'wb') as output:
# pickle.dump(A, output, pickle.HIGHEST_PROTOCOL)
'''The data is now in a pkl file, stored as a list of lists. Each row looks like this:
[game_id,game_round_id,player_one_throw,player_two_throw]
We don't know what 1, 2, and 3 represent. Or even which one beats which.
But presumebly 0 means the contestant didn't enter a input, thus walks away.
They can also both walk away at the same time.
We do know that game_rounds can't end in a tie, and a game lasts until there are 3 wins, or someone walks away.'''
def get_frequency(data, options):
length = len(data)
frequency = {}
for opt in options: #labeling each option
frequency[opt] = 0
for j in range(0,length): #Counting each option
frequency[data[j]] += 1
for opt in options:
frequency[opt] = frequency[opt]/length
return frequency
def process_data(data, n):
'''input a 2-dim list of numbers. First collum is gameID, second is the outputs for each round.
returns a list of all singles, pairs, tripples, quadupples, etc... up to n. '''
processed_data =[] #creates the empty data
for i in range(0,n):
processed_data.append([])
options = [1,2,3] #the valid outputs not to skip\
skip = 0 #the only value to skip
L = len(data) #don't want to calculate this a lot
#Setting up how the counters work
game_id = 0 #should reflect the actual game_id
round = 0 #should not be the actual round
for i in range(0,L):
if (data[i][0] != game_id):
game_id += 1
round = 0
if (data[i][1] != skip):
round += 1
#does the singleton first
processed_data[0].append(data[i][1])
for j in range(2,n+1):
if (round >= j):
#print(processed_data)
#print('j =', j)
processed_data[j-1].append(processed_data[0][-j:-1] + [processed_data[0][-1]])
return processed_data
from itertools import product
def prod(set,k):
'''takes in a list of n elements. Returns a list of all possible permutations of k elements out of the set.'''
return list(product(set, repeat = k))
#return list(map(list, list(product(set, repeat = k))))
def get_multi_frequency(data, n):
'''input a 2-dim list of numbers. First collum is gameID, second is the outputs for each round.
returns a the frequency of evey singles, pairs, tripples, quadupples, etc... up to n. '''
processed_data =[] #creates the empty data
for i in range(0,n):
processed_data.append([])
options = [1,2,3] #the possible options
multi_options = []
for i in range(1,n+1):
multi_options += [prod(options, i)]
#creat the frequency table
frequency = []
for i in range(0,n):
frequency.append({})
for opt in multi_options[i]:
#print(frequency)
#print(multi_options[i])
frequency[-1][opt] = 0
frequency[-1]['total'] = 0
#print(frequency)
skip = 0 #the only value to skip
L = len(data) #don't want to calculate this a lot
#Setting up how the counters work
game_id = 0 #should reflect the actual game_id
round = 0 #should not be the actual round
for i in range(0,L):
if (data[i][0] != game_id):
game_id = data[i][0]
round = 0
if (data[i][1] != skip):
round += 1
#do the singleton first
#print(frequency)
frequency[0][tuple([data[i][1]])] += 1
frequency[0]['total'] += 1
processed_data[0].append(data[i][1])
for j in range(2,n+1):
if (round >= j):
#print(processed_data)
#print('j =', j)
#print(processed_data[0][-j:-1] + [processed_data[0][-1]])
#print(tuple([processed_data[0][-j:-1] + [processed_data[0][-1]]]))
frequency[j-1][ tuple(processed_data[0][-j:-1] + [processed_data[0][-1]]) ] += 1
frequency[j-1]['total'] +=1
#processed_data[j-1].append(processed_data[0][-j:-1] + [processed_data[0][-1]])
return (processed_data,frequency)
#An = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (0,2), ndmin = 2)
An = np.genfromtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (0,2), max_rows = 5000)
#Am = An[0:100]
An = An.tolist()
#new_data = process_data(Am, 2)
#print(new_data)
(A,B) = get_multi_frequency(An, 4)
#print(A)
print(B)
'''
C = get_frequency(A, [1,2,3, 0])
print(C)
D = get_frequency(B, [1,2,3,0])
print(D)
'''
#This shows that Humans (or wherever this data came from) is not a uniform random distribution
| [
"itertools.product",
"numpy.genfromtxt"
] | [((4403, 4522), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Rock_Paper_Scissors_Raw.txt"""'], {'dtype': 'int', 'comments': '"""#"""', 'delimiter': '""","""', 'usecols': '(0, 2)', 'max_rows': '(5000)'}), "('Rock_Paper_Scissors_Raw.txt', dtype=int, comments='#',\n delimiter=',', usecols=(0, 2), max_rows=5000)\n", (4416, 4522), True, 'import numpy as np\n'), ((2520, 2542), 'itertools.product', 'product', (['set'], {'repeat': 'k'}), '(set, repeat=k)\n', (2527, 2542), False, 'from itertools import product\n')] |
#!/usr/bin/env python3
# test_conv2d.py
#
# Copyright (c) 2010-2018 Wave Computing, Inc. and its applicable licensors.
# All rights reserved; provided, that any files identified as open source shall
# be governed by the specific open source license(s) applicable to such files.
#
# For any files associated with distributions under the Apache 2.0 license,
# full attribution to The Apache Software Foundation is given via the license
# below.
#
# PURPOSE
# Unit test for FP Conv2D
#
# Author : <NAME>
# Created On : 02/26/2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import progressbar as pb
import waveflow
def compare_tensor(z1, z2, msg):
''' Run a compare on 2 tensors for equality. Report failure details.
'''
# assert z1.shape == z2.shape, msg
if z1.shape != z2.shape:
print(msg)
print("z1 shape: %s, z2 shape: %s" % (str(z1.shape), str(z2.shape)))
return False
rtol = 1e-4
if not np.allclose(z1, z2, atol=rtol):
print("\n\n")
d = ~np.isclose(z1, z2, atol=rtol)
print("z1 mismatch: %s" % (z1[d]))
print("z2 mismatch: %s" % (z2[d]))
print("at: %s" % (str(np.where(d))))
print("Failure: %s" % (msg))
return False
return True
def conv2d_test(config, t_init, i, p, activations, c2d_wts, stride, padding):
with tf.Session('', config=config) as sess:
t_init.run()
# print("Wave Kernel (NN):\n-------------------------------------------------")
z_op = waveflow.wavecomp_ops_module.wave_conv2d(activations, c2d_wts, strides=[1, stride, stride, 1], padding=padding)
# Base tensorflow. Only supports NHWC.
z2_op = tf.nn.conv2d(activations, c2d_wts,
strides=[1, stride, stride, 1], padding=padding, data_format='NHWC', use_cudnn_on_gpu=False)
z, z2, act_val, wts_val = sess.run([z_op, z2_op, activations, c2d_wts])
# print("\nTF:\n-------------------------------------------------")
assert_str = "Failure on i: %d, mode: SAME, params: %s" % (i, str(p))
if not compare_tensor(z, z2, assert_str):
print("activations: %s" % (act_val))
print("c2d_wts: %s" % (wts_val))
print("\n\n")
assert False
def test_conv2d():
''' Run tests on the Wave custom conv2d operator.
'''
tf.reset_default_graph()
# Turn off graph-rewriting optimizations
config = tf.ConfigProto(graph_options=tf.GraphOptions(optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)))
iterations = 10
widgets = ["conv2d tests: ", pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=iterations)
pbar.start()
# Interesting kernel variants to cycle through
kernel_params = [
{'t_n':100, 't_ci':1, 't_co':32, 't_h':28, 't_w':28, 'w_k':5},
{'t_n':4, 't_ci':32, 't_co':32, 't_h':15, 't_w':15, 'w_k':3},
{'t_n':1, 't_ci':4, 't_co':64, 't_h':16, 't_w':16, 'w_k':3},
{'t_n':128, 't_ci':64, 't_co':128, 't_h':7, 't_w':7, 'w_k':5},
{'t_n':4, 't_ci':8, 't_co':4, 't_h':224, 't_w':224, 'w_k':7},
{'t_n':100, 't_ci':1, 't_co':32, 't_h':28, 't_w':28, 'w_k':1},
{'t_n':1, 't_ci':1, 't_co':2, 't_h':4, 't_w':4, 'w_k':1}
]
for i in range(iterations):
pbar.update(i)
tf.reset_default_graph()
# NCHW
p = kernel_params[i % len(kernel_params)]
t_n = p['t_n']
t_ci = p['t_ci']
t_co = p['t_co']
t_h = p['t_h']
t_w = p['t_w']
w_k = p['w_k']
# N H W C
activations = tf.get_variable("a", [t_n, t_h, t_w, t_ci], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
# K K Ci Co
c2d_wts = tf.get_variable("b", [w_k, w_k, t_ci, t_co], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
t_init = tf.global_variables_initializer()
# SAME variant, stride = 1
conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=1, padding='SAME')
# Valid variant, stride = 1
conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=1, padding='VALID')
# SAME variant, stride = 2
# conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=2, padding='SAME')
# Valid variant, stride = 2
conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=2, padding='VALID')
pbar.finish()
return True
if __name__ == "__main__":
test_conv2d()
| [
"tensorflow.nn.conv2d",
"progressbar.Bar",
"numpy.allclose",
"tensorflow.reset_default_graph",
"numpy.isclose",
"numpy.where",
"waveflow.wavecomp_ops_module.wave_conv2d",
"tensorflow.Session",
"tensorflow.truncated_normal_initializer",
"tensorflow.global_variables_initializer",
"progressbar.Perc... | [((2939, 2963), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2961, 2963), True, 'import tensorflow as tf\n'), ((3255, 3305), 'progressbar.ProgressBar', 'pb.ProgressBar', ([], {'widgets': 'widgets', 'maxval': 'iterations'}), '(widgets=widgets, maxval=iterations)\n', (3269, 3305), True, 'import progressbar as pb\n'), ((1532, 1562), 'numpy.allclose', 'np.allclose', (['z1', 'z2'], {'atol': 'rtol'}), '(z1, z2, atol=rtol)\n', (1543, 1562), True, 'import numpy as np\n'), ((1924, 1953), 'tensorflow.Session', 'tf.Session', (['""""""'], {'config': 'config'}), "('', config=config)\n", (1934, 1953), True, 'import tensorflow as tf\n'), ((2096, 2211), 'waveflow.wavecomp_ops_module.wave_conv2d', 'waveflow.wavecomp_ops_module.wave_conv2d', (['activations', 'c2d_wts'], {'strides': '[1, stride, stride, 1]', 'padding': 'padding'}), '(activations, c2d_wts, strides=[1,\n stride, stride, 1], padding=padding)\n', (2136, 2211), False, 'import waveflow\n'), ((2280, 2412), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['activations', 'c2d_wts'], {'strides': '[1, stride, stride, 1]', 'padding': 'padding', 'data_format': '"""NHWC"""', 'use_cudnn_on_gpu': '(False)'}), "(activations, c2d_wts, strides=[1, stride, stride, 1], padding=\n padding, data_format='NHWC', use_cudnn_on_gpu=False)\n", (2292, 2412), True, 'import tensorflow as tf\n'), ((3197, 3212), 'progressbar.Percentage', 'pb.Percentage', ([], {}), '()\n', (3210, 3212), True, 'import progressbar as pb\n'), ((3219, 3227), 'progressbar.Bar', 'pb.Bar', ([], {}), '()\n', (3225, 3227), True, 'import progressbar as pb\n'), ((3234, 3242), 'progressbar.ETA', 'pb.ETA', ([], {}), '()\n', (3240, 3242), True, 'import progressbar as pb\n'), ((3992, 4016), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4014, 4016), True, 'import tensorflow as tf\n'), ((4565, 4598), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4596, 4598), True, 'import tensorflow as tf\n'), ((1599, 1628), 'numpy.isclose', 'np.isclose', (['z1', 'z2'], {'atol': 'rtol'}), '(z1, z2, atol=rtol)\n', (1609, 1628), True, 'import numpy as np\n'), ((4340, 4383), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (4371, 4383), True, 'import tensorflow as tf\n'), ((4502, 4545), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (4533, 4545), True, 'import tensorflow as tf\n'), ((1745, 1756), 'numpy.where', 'np.where', (['d'], {}), '(d)\n', (1753, 1756), True, 'import numpy as np\n'), ((3086, 3139), 'tensorflow.OptimizerOptions', 'tf.OptimizerOptions', ([], {'opt_level': 'tf.OptimizerOptions.L0'}), '(opt_level=tf.OptimizerOptions.L0)\n', (3105, 3139), True, 'import tensorflow as tf\n')] |
# Copyright 2019 SAP SE
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import os
import random
import shutil
import time
import numpy as np
import torch.utils.data
from cfg.load_config import opt, cfg_from_file
ts = time.time()
# Arguments
parser = argparse.ArgumentParser(description='xxx')
parser.add_argument('--dataset', default='svhn', type=str, required=False,
choices=['mnist', 'svhn'], help='Dataset name')
parser.add_argument('--method', type=str, required=True,
choices=['dgmw1', 'dgmw2'], help='Method to run.')
# parser.add_argument('--cfg_file',default=None,type=str,required=False, help='Path to the configuration file')
cfg = parser.parse_args()
# if cfg.method == "DGMw":
# if cfg.dataset == "mnist":
# cfg_file = 'cfg/cfg_mnist_dgmw.yml'
# cfg_from_file(cfg_file)
# elif cfg.dataset == "svhn":
# cfg_file = 'cfg/cfg_svhn_dgmw.yml'
# cfg_from_file(cfg_file)
# elif cfg.method == "DGMa":
# raise NotImplementedError
# if cfg.dataset == "mnist":
# cfg_file = 'cfg/cfg_mnist_dgma.yml'
# cfg_from_file(cfg_file)
# elif cfg.dataset == "svhn":
# cfg_file = 'cfg/cfg_svhn_dgma.yml'
# cfg_from_file(cfg_file)
cfg_file = 'cfg/cfg_{}_{}.yml'.format(cfg.dataset, cfg.method)
cfg_from_file(cfg_file)
print(opt)
#######################################################################################################################
opt.device = torch.device(
"cuda:" + str(opt.device) if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.cuda.set_device(opt.device)
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
try:
os.makedirs(opt.outf_models)
except OSError:
pass
try:
os.makedirs(opt.outf + '/mask_histo')
except:
pass
if opt.dataset == "mnist":
from dataloaders import split_MNIST as dataloader
elif opt.dataset == "svhn":
from dataloaders import split_SVHN as dataloader
if opt.method == "DGMw":
from networks import net_DGMw as model
from approaches import DGMw as approach
elif opt.method == "DGMa":
from networks import net_DGMa as model
from approaches import DGMa as approach
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(opt.manualSeed)
print('Load data...')
data, taskcla, inputsize = dataloader.get(seed=opt.manualSeed,
data_root=opt.dataroot + str(
opt.imageSize), n_classes=1,
imageSize=opt.imageSize)
print('Input size =', inputsize, '\nTask info =', taskcla)
for t in range(10):
data[t]['train']['y'].data.fill_(t)
data[t]['test']['y'].data.fill_(t)
data[t]['valid']['y'].data.fill_(t)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nb_label = 10
if opt.dataset == 'mnist':
nc = 1
elif opt.dataset == 'svhn':
nc = 3
# classes are added one by one, we innitialize G with one head output
netG = model.netG(nz, ngf, nc, opt.smax_g, n_classes=1)
print(netG)
netD = model.netD(ndf, nc)
print(netD)
log_dir = opt.log_dir + datetime.datetime.fromtimestamp(ts).strftime(
'%Y_%m_%d_%H_%M_%S')
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.makedirs(log_dir)
appr = approach.App(model, netG, netD, log_dir, opt.outf, niter=opt.niter,
batchSize=opt.batchSize,
imageSize=opt.imageSize, nz=int(opt.nz), nb_label=nb_label,
cuda=torch.cuda.is_available(), beta1=opt.beta1,
lr_D=opt.lr_D, lr_G=opt.lr_G, lamb_G=opt.lamb_G,
reinit_D=opt.reinit_D, lambda_adv=opt.lambda_adv,
lambda_wassersten=opt.lambda_wasserstein,
dataset=opt.dataset, store_model=opt.store_models)
def n_parameters(model):
from operator import mul
from functools import reduce
return sum([reduce(mul, p.shape) for p in model.parameters()])
for t in range(10):
test_acc_task, conf_matrixes_task, mask_G = appr.train(data, t,
smax_g=opt.smax_g,
use_aux_G=opt.aux_G)
print('Task {}: {:,} parameters'.format(t, n_parameters(netG)))
| [
"os.path.exists",
"datetime.datetime.fromtimestamp",
"argparse.ArgumentParser",
"os.makedirs",
"functools.reduce",
"cfg.load_config.cfg_from_file",
"networks.net_DGMa.netD",
"random.seed",
"numpy.random.seed",
"shutil.rmtree",
"networks.net_DGMa.parameters",
"time.time",
"random.randint",
... | [((785, 796), 'time.time', 'time.time', ([], {}), '()\n', (794, 796), False, 'import time\n'), ((819, 861), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""xxx"""'}), "(description='xxx')\n", (842, 861), False, 'import argparse\n'), ((1874, 1897), 'cfg.load_config.cfg_from_file', 'cfg_from_file', (['cfg_file'], {}), '(cfg_file)\n', (1887, 1897), False, 'from cfg.load_config import opt, cfg_from_file\n'), ((2893, 2920), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2904, 2920), False, 'import random\n'), ((2955, 2985), 'numpy.random.seed', 'np.random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2969, 2985), True, 'import numpy as np\n'), ((3787, 3835), 'networks.net_DGMa.netG', 'model.netG', (['nz', 'ngf', 'nc', 'opt.smax_g'], {'n_classes': '(1)'}), '(nz, ngf, nc, opt.smax_g, n_classes=1)\n', (3797, 3835), True, 'from networks import net_DGMa as model\n'), ((3855, 3874), 'networks.net_DGMa.netD', 'model.netD', (['ndf', 'nc'], {}), '(ndf, nc)\n', (3865, 3874), True, 'from networks import net_DGMa as model\n'), ((3986, 4009), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (4000, 4009), False, 'import os\n'), ((4038, 4058), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (4049, 4058), False, 'import os\n'), ((2217, 2238), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (2228, 2238), False, 'import os\n'), ((2273, 2301), 'os.makedirs', 'os.makedirs', (['opt.outf_models'], {}), '(opt.outf_models)\n', (2284, 2301), False, 'import os\n'), ((2336, 2373), 'os.makedirs', 'os.makedirs', (["(opt.outf + '/mask_histo')"], {}), "(opt.outf + '/mask_histo')\n", (2347, 2373), False, 'import os\n'), ((2829, 2853), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2843, 2853), False, 'import random\n'), ((4015, 4037), 'shutil.rmtree', 'shutil.rmtree', (['log_dir'], {}), '(log_dir)\n', (4028, 4037), False, 'import shutil\n'), ((3912, 3947), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (3943, 3947), False, 'import datetime\n'), ((4707, 4727), 'functools.reduce', 'reduce', (['mul', 'p.shape'], {}), '(mul, p.shape)\n', (4713, 4727), False, 'from functools import reduce\n'), ((4737, 4755), 'networks.net_DGMa.parameters', 'model.parameters', ([], {}), '()\n', (4753, 4755), True, 'from networks import net_DGMa as model\n')] |
import torch
from scipy.misc import imread, imsave, imresize
import matplotlib.pyplot as plt
import numpy as np
from path import Path
import argparse
from tqdm import tqdm
from models import DispResNet6
from utils import tensor2array
parser = argparse.ArgumentParser(description='Inference script for DispNet learned with \
Structure from Motion Learner inference on KITTI and CityScapes Dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument("--pretrained", type=str, help="pretrained DispNet path",default='/home/roit/models/cc/official/dispnet_k.pth.tar')
parser.add_argument("--pretrained", type=str, help="pretrained DispNet path",default='/home/roit/models/cc/300epc.all/dispnet_model_best.pth.tar')
parser.add_argument("--img-height", default=256, type=int, help="Image height")
parser.add_argument("--img-width", default=512, type=int, help="Image width")
parser.add_argument("--no-resize", action='store_true', help="no resizing is done")
parser.add_argument("--dataset-list", default=None, type=str, help="Dataset list file")
parser.add_argument("--dataset-dir",
#default='/home/roit/datasets/kitti_small/data', type=str,help="Dataset directory")
default='/home/roit/datasets/MC_256512/2019_09_26_10_58/imgs', type=str,help="Dataset directory")
parser.add_argument("--output-dir", default='output', type=str, help="Output directory")
parser.add_argument("--output-disp", action='store_true', help="save disparity img",default=True)
parser.add_argument("--output-depth", action='store_true', help="save depth img",default=True)
parser.add_argument("--img-exts", default=['png', 'jpg', 'bmp'], nargs='*', type=str, help="images extensions to glob")
def main():
args = parser.parse_args()
if not(args.output_disp or args.output_depth):
print('You must at least output one value !')
return
disp_net = DispResNet6().cuda()
weights = torch.load(args.pretrained)
disp_net.load_state_dict(weights['state_dict'])
disp_net.eval()
dataset_dir = Path(args.dataset_dir)
output_dir = Path(args.output_dir)
output_dir.makedirs_p()
disp_dir = output_dir/dataset_dir.stem+'disp'
depth_dir = output_dir/dataset_dir.stem+'depth'
disp_dir.makedirs_p()
depth_dir.makedirs_p()
if args.dataset_list is not None:
with open(args.dataset_list, 'r') as f:
test_files = [dataset_dir/file for file in f.read().splitlines()]
else:
test_files = sum([dataset_dir.files('*.{}'.format(ext)) for ext in args.img_exts], [])
print('{} files to test'.format(len(test_files)))
for file in tqdm(test_files):
img = imread(file).astype(np.float32)
h,w,_ = img.shape
if (not args.no_resize) and (h != args.img_height or w != args.img_width):
img = imresize(img, (args.img_height, args.img_width)).astype(np.float32)
img = np.transpose(img, (2, 0, 1))
tensor_img = torch.from_numpy(img).unsqueeze(0)
tensor_img = ((tensor_img/255 - 0.5)/0.2).cuda()
disp = disp_net(tensor_img)#输出为单通道,
depth = 1/disp#第一个batch
'''
if args.output_disp:
disp = disp.cpu().data.numpy()
disp=disp[0][0]*255
plt.imsave(disp_dir/'{}.{}'.format(file.stem,'png'), disp,cmap='bone')
if args.output_depth:
depth=depth.cpu().data.numpy()
depth=depth[0][0]*255
plt.imsave(depth_dir/'{}.{}'.format(file.stem,'png'), depth,cmap='bone')
'''
if args.output_disp:
disp=tensor2array(disp[0],colormap='bone')
disp=np.transpose(disp,[1,2,0])
plt.imsave(disp_dir/'{}.{}'.format(file.stem,'png'), disp,cmap='bone')
if args.output_depth:
depth=tensor2array(depth[0],colormap='bone')
depth=np.transpose(depth,[1,2,0])
plt.imsave(depth_dir/'{}.{}'.format(file.stem,'png'), depth,cmap='bone')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"utils.tensor2array",
"torch.load",
"tqdm.tqdm",
"torch.from_numpy",
"path.Path",
"scipy.misc.imread",
"scipy.misc.imresize",
"models.DispResNet6",
"numpy.transpose"
] | [((246, 497), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Inference script for DispNet learned with Structure from Motion Learner inference on KITTI and CityScapes Dataset"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Inference script for DispNet learned with Structure from Motion Learner inference on KITTI and CityScapes Dataset'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (269, 497), False, 'import argparse\n'), ((2020, 2047), 'torch.load', 'torch.load', (['args.pretrained'], {}), '(args.pretrained)\n', (2030, 2047), False, 'import torch\n'), ((2139, 2161), 'path.Path', 'Path', (['args.dataset_dir'], {}), '(args.dataset_dir)\n', (2143, 2161), False, 'from path import Path\n'), ((2179, 2200), 'path.Path', 'Path', (['args.output_dir'], {}), '(args.output_dir)\n', (2183, 2200), False, 'from path import Path\n'), ((2729, 2745), 'tqdm.tqdm', 'tqdm', (['test_files'], {}), '(test_files)\n', (2733, 2745), False, 'from tqdm import tqdm\n'), ((3004, 3032), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (3016, 3032), True, 'import numpy as np\n'), ((1985, 1998), 'models.DispResNet6', 'DispResNet6', ([], {}), '()\n', (1996, 1998), False, 'from models import DispResNet6\n'), ((3675, 3713), 'utils.tensor2array', 'tensor2array', (['disp[0]'], {'colormap': '"""bone"""'}), "(disp[0], colormap='bone')\n", (3687, 3713), False, 'from utils import tensor2array\n'), ((3730, 3759), 'numpy.transpose', 'np.transpose', (['disp', '[1, 2, 0]'], {}), '(disp, [1, 2, 0])\n', (3742, 3759), True, 'import numpy as np\n'), ((3888, 3927), 'utils.tensor2array', 'tensor2array', (['depth[0]'], {'colormap': '"""bone"""'}), "(depth[0], colormap='bone')\n", (3900, 3927), False, 'from utils import tensor2array\n'), ((3945, 3975), 'numpy.transpose', 'np.transpose', (['depth', '[1, 2, 0]'], {}), '(depth, [1, 2, 0])\n', (3957, 3975), True, 'import numpy as np\n'), ((2762, 2774), 'scipy.misc.imread', 'imread', (['file'], {}), '(file)\n', (2768, 2774), False, 'from scipy.misc import imread, imsave, imresize\n'), ((3055, 3076), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (3071, 3076), False, 'import torch\n'), ((2922, 2970), 'scipy.misc.imresize', 'imresize', (['img', '(args.img_height, args.img_width)'], {}), '(img, (args.img_height, args.img_width))\n', (2930, 2970), False, 'from scipy.misc import imread, imsave, imresize\n')] |
import pickle
from PIL import Image
import numpy as np
from dlib import cnn_face_detection_model_v1
from controller import Camera
from flockai.PyCatascopia.Metrics import *
from flockai.interfaces.flockai_ml import FlockAIClassifier
from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, \
ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric
from flockai.webots_controllers.mavic2dji import KeyboardMavic2DJI
from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, \
Relative2DPosition, Devices
"""""""""""""""""""""
DECLARE DEVICES HERE
"""""""""""""""""""""
enableable_devices = [
(EnableableDevice.RECEIVER, "receiver"),
(EnableableDevice.CAMERA, "camera"),
(EnableableDevice.KEYBOARD, None),
(EnableableDevice.BATTERY_SENSOR, None),
(EnableableDevice.INERTIAL_UNIT, "inertial unit"),
(EnableableDevice.GPS, "gps"),
(EnableableDevice.COMPASS, "compass"),
(EnableableDevice.GYRO, "gyro")
]
non_enableable_devices = [
(NonEnableableDevice.EMITTER, "emitter"),
(NonEnableableDevice.LED, "front left led"),
(NonEnableableDevice.LED, "front right led"),
(NonEnableableDevice.DISTANCE_SENSOR, "ds0")
]
"""""""""""""""""""""
DECLARE MOTORS HERE
"""""""""""""""""""""
motor_devices = [
(MotorDevice.CAMERA, "camera roll", AircraftAxis.ROLL),
(MotorDevice.CAMERA, "camera pitch", AircraftAxis.PITCH),
(MotorDevice.CAMERA, "camera yaw", AircraftAxis.YAW),
(MotorDevice.PROPELLER, "front left propeller", Relative2DPosition(1, -1)),
(MotorDevice.PROPELLER, "front right propeller", Relative2DPosition(1, 1)),
(MotorDevice.PROPELLER, "rear left propeller", Relative2DPosition(-1, -1)),
(MotorDevice.PROPELLER, "rear right propeller", Relative2DPosition(-1, 1)),
]
devices = Devices(enableable_devices, non_enableable_devices, motor_devices)
"""""""""""""""""""""""""""
CREATE MONITORING PROBES
"""""""""""""""""""""""""""
metrics = [
ProcessCpuUtilizationMetric(name='cpu_pct', units='%', desc='process-level cpu utilization', minVal=0, higherIsBetter=False),
ProcessCpuTimeMetric('cpu_time', 's', 'process-level cpu time', minVal=0, higherIsBetter=False),
ProcessIOTimeMetric('io_time', 's', 'process-level io time (linux-only)', minVal=0, higherIsBetter=False),
ProcessAliveTimeMetric('alive_time', 's', 'time process is alive', minVal=0, higherIsBetter=False),
ProbeAliveTimeMetric('probe_alive_time', 's', 'time probe is alive', minVal=0, higherIsBetter=False),
ProcessMemoryMetric('mem_pct', '%', 'process-level memory utilization', minVal=0, higherIsBetter=False),
]
probe = FlockAIProbe(metrics, name='Example Probe', periodicity=1)
"""""""""""""""""""""""""""""
INITIALIZE THE CONTROLLER
"""""""""""""""""""""""""""""
controller = KeyboardMavic2DJI(devices=devices, probe=probe)
"""""""""""""""""""""""""""""""""""
IMPLEMENT THE FLOCKAI CLASSIFIER
"""""""""""""""""""""""""""""""""""
class FaceDetectionClassifier(FlockAIClassifier):
def __init__(self):
super().__init__()
# REQUIRED ATTRIBUTES
self.periodicity = 5 # defines the periodicity of the prediction
self.onboard = True # defines if the classifier is run on the drone, if False, the drone transmits the input data via its emitter device
self._load_model()
""" IMPLEMENT ABSTRACT METHODS"""
def _load_model(self):
"""
Custom method that implements the way a model is loaded
:return:
"""
filename = 'cnnFaceRecognition.bin'
self.model = pickle.load(open(filename, 'rb'))
self.cnn_face_detector = cnn_face_detection_model_v1(self.model)
def _get_model_input(self):
"""
Custom method that access the camera on the controller and captures images
:return:
"""
filename = f'logs/Images/image_{str(int(time.time()))}.jpg'
camera: Camera = controller.devices['camera']['device'] # get access to controller devices
camera.saveImage(filename, 20)
return filename
def predict(self):
"""
Main pipeline method used by FlockAI during the simulation to make predictions
:return:
"""
if controller.getTime() % self.periodicity != 0.0: # get access to controller functions
return None
image_filename = self._get_model_input()
# return image_filename
image = self._load_image_file(image_filename)
return [self._trim_css_to_bounds(self._rect_to_css(face.rect), image.shape) for face in self.cnn_face_detector(image, 1)]
""" IMPLEMENT CUSTOM METHODS """
def _get_foo_unused_input(self):
"""
Unused method showcasing a different input method that the user needs
:return:
"""
camera: Camera = controller.devices['camera']['device']
image = camera.getImage()
width = camera.getWidth()
height = camera.getHeight()
image_vector = [[[camera.imageGetRed(image, width, x, y),
camera.imageGetGreen(image, width, x, y),
camera.imageGetBlue(image, width, x, y)] for y in range(height)] for x in range(width)]
return image_vector
def _trim_css_to_bounds(self, css, image_shape):
return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
def _rect_to_css(self, rect):
return rect.top(), rect.right(), rect.bottom(), rect.left()
def _load_image_file(self, file, mode='RGB'):
im = Image.open(file)
if mode:
im = im.convert(mode)
return np.array(im)
"""""""""""""""""""""""""""""""""""""""""""""
SET THE ML MODEL ON THE CONTROLLER AND RUN IT
"""""""""""""""""""""""""""""""""""""""""""""
controller.model = FaceDetectionClassifier()
controller.run()
| [
"flockai.models.devices.device_enums.Devices",
"PIL.Image.open",
"flockai.models.probes.flockai_probe.ProcessCpuTimeMetric",
"flockai.models.probes.flockai_probe.FlockAIProbe",
"flockai.models.probes.flockai_probe.ProbeAliveTimeMetric",
"flockai.models.devices.device_enums.Relative2DPosition",
"dlib.cnn... | [((1903, 1969), 'flockai.models.devices.device_enums.Devices', 'Devices', (['enableable_devices', 'non_enableable_devices', 'motor_devices'], {}), '(enableable_devices, non_enableable_devices, motor_devices)\n', (1910, 1969), False, 'from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, Relative2DPosition, Devices\n'), ((2735, 2793), 'flockai.models.probes.flockai_probe.FlockAIProbe', 'FlockAIProbe', (['metrics'], {'name': '"""Example Probe"""', 'periodicity': '(1)'}), "(metrics, name='Example Probe', periodicity=1)\n", (2747, 2793), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((2894, 2941), 'flockai.webots_controllers.mavic2dji.KeyboardMavic2DJI', 'KeyboardMavic2DJI', ([], {'devices': 'devices', 'probe': 'probe'}), '(devices=devices, probe=probe)\n', (2911, 2941), False, 'from flockai.webots_controllers.mavic2dji import KeyboardMavic2DJI\n'), ((2068, 2197), 'flockai.models.probes.flockai_probe.ProcessCpuUtilizationMetric', 'ProcessCpuUtilizationMetric', ([], {'name': '"""cpu_pct"""', 'units': '"""%"""', 'desc': '"""process-level cpu utilization"""', 'minVal': '(0)', 'higherIsBetter': '(False)'}), "(name='cpu_pct', units='%', desc=\n 'process-level cpu utilization', minVal=0, higherIsBetter=False)\n", (2095, 2197), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((2198, 2297), 'flockai.models.probes.flockai_probe.ProcessCpuTimeMetric', 'ProcessCpuTimeMetric', (['"""cpu_time"""', '"""s"""', '"""process-level cpu time"""'], {'minVal': '(0)', 'higherIsBetter': '(False)'}), "('cpu_time', 's', 'process-level cpu time', minVal=0,\n higherIsBetter=False)\n", (2218, 2297), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((2299, 2408), 'flockai.models.probes.flockai_probe.ProcessIOTimeMetric', 'ProcessIOTimeMetric', (['"""io_time"""', '"""s"""', '"""process-level io time (linux-only)"""'], {'minVal': '(0)', 'higherIsBetter': '(False)'}), "('io_time', 's', 'process-level io time (linux-only)',\n minVal=0, higherIsBetter=False)\n", (2318, 2408), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((2410, 2512), 'flockai.models.probes.flockai_probe.ProcessAliveTimeMetric', 'ProcessAliveTimeMetric', (['"""alive_time"""', '"""s"""', '"""time process is alive"""'], {'minVal': '(0)', 'higherIsBetter': '(False)'}), "('alive_time', 's', 'time process is alive', minVal=0,\n higherIsBetter=False)\n", (2432, 2512), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((2514, 2619), 'flockai.models.probes.flockai_probe.ProbeAliveTimeMetric', 'ProbeAliveTimeMetric', (['"""probe_alive_time"""', '"""s"""', '"""time probe is alive"""'], {'minVal': '(0)', 'higherIsBetter': '(False)'}), "('probe_alive_time', 's', 'time probe is alive', minVal\n =0, higherIsBetter=False)\n", (2534, 2619), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((2620, 2727), 'flockai.models.probes.flockai_probe.ProcessMemoryMetric', 'ProcessMemoryMetric', (['"""mem_pct"""', '"""%"""', '"""process-level memory utilization"""'], {'minVal': '(0)', 'higherIsBetter': '(False)'}), "('mem_pct', '%', 'process-level memory utilization',\n minVal=0, higherIsBetter=False)\n", (2639, 2727), False, 'from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric\n'), ((1623, 1648), 'flockai.models.devices.device_enums.Relative2DPosition', 'Relative2DPosition', (['(1)', '(-1)'], {}), '(1, -1)\n', (1641, 1648), False, 'from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, Relative2DPosition, Devices\n'), ((1704, 1728), 'flockai.models.devices.device_enums.Relative2DPosition', 'Relative2DPosition', (['(1)', '(1)'], {}), '(1, 1)\n', (1722, 1728), False, 'from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, Relative2DPosition, Devices\n'), ((1782, 1808), 'flockai.models.devices.device_enums.Relative2DPosition', 'Relative2DPosition', (['(-1)', '(-1)'], {}), '(-1, -1)\n', (1800, 1808), False, 'from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, Relative2DPosition, Devices\n'), ((1863, 1888), 'flockai.models.devices.device_enums.Relative2DPosition', 'Relative2DPosition', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1881, 1888), False, 'from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, Relative2DPosition, Devices\n'), ((3729, 3768), 'dlib.cnn_face_detection_model_v1', 'cnn_face_detection_model_v1', (['self.model'], {}), '(self.model)\n', (3756, 3768), False, 'from dlib import cnn_face_detection_model_v1\n'), ((5658, 5674), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (5668, 5674), False, 'from PIL import Image\n'), ((5741, 5753), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (5749, 5753), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
"""Demo of different plot API styles: procedural test_widget and OO test_plot
"""
from __future__ import print_function
import logging
import sys
import numpy
from PyQt4 import QtGui
logging.basicConfig()
logger = logging.getLogger(__name__)
app = QtGui.QApplication([])
def test_widget():
"""Code with PlotWidget API"""
from plot.PlotWidget import PlotWidget
plt = PlotWidget()
plt.addCurve(x=None, y=(1, 2, 2))
plt.addImage(data=numpy.arange(100).reshape(10, -1), xScale=(0, 1), yScale=(10, 1))
plt.setGraphTitle("Procedural Plot API")
plt.setGraphXLabel("x label")
plt.setGraphYLabel("y label")
plt.setGraphXLimits(0, 10)
plt.setGraphYLimits(0, 20)
plt.invertYAxis(True)
plt.show()
return plt
def test_plot():
"""Code with OO API"""
from plot import BackendMPL, Plot
class MyPlotWidget(Plot):
"""Glue class, should be provided by plot"""
def __init__(self, title=""):
super(MyPlotWidget, self).__init__(title=title)
self.backend = BackendMPL(self)
def show(self):
self.backend.show()
#####################
plt = MyPlotWidget()
plt.addImage(data=numpy.arange(100).reshape(10, -1), origin=(0, 10))
curve = plt.addCurve(y=(1, 2, 2))
plt.title = "OO Plot API"
plt.xlabel = "x left"
plt.ylabel = "y left"
plt.xlimits = 0, 10
plt.ylimits = 20, 0
plt.show()
# Update
curve.linewidth = 2
plt.grid = "both"
plt.axes.right.ylabel = "y right"
return plt
plot = test_widget()
plotOO = test_plot()
sys.exit(app.exec_())
| [
"logging.basicConfig",
"PyQt4.QtGui.QApplication",
"logging.getLogger",
"plot.PlotWidget.PlotWidget",
"plot.BackendMPL",
"numpy.arange"
] | [((225, 246), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (244, 246), False, 'import logging\n'), ((256, 283), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'import logging\n'), ((291, 313), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (309, 313), False, 'from PyQt4 import QtGui\n'), ((424, 436), 'plot.PlotWidget.PlotWidget', 'PlotWidget', ([], {}), '()\n', (434, 436), False, 'from plot.PlotWidget import PlotWidget\n'), ((1088, 1104), 'plot.BackendMPL', 'BackendMPL', (['self'], {}), '(self)\n', (1098, 1104), False, 'from plot import BackendMPL, Plot\n'), ((497, 514), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (509, 514), False, 'import numpy\n'), ((1237, 1254), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (1249, 1254), False, 'import numpy\n')] |
import cv2
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
# imagenet
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
class CustomDataset(Dataset):
def __init__(self, all_img_path_list, transform, ):
self.all_img_paths = all_img_path_list
self.transform = transform
def __len__(self):
return len(self.all_img_paths)
def __getitem__(self, idx):
img_path = self.all_img_paths[idx]
# solve chinese file name problem
img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
img = Image.fromarray(img)
img = self.transform(img)
return img, img_path
| [
"PIL.Image.fromarray",
"numpy.fromfile"
] | [((625, 645), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (640, 645), False, 'from PIL import Image\n'), ((554, 591), 'numpy.fromfile', 'np.fromfile', (['img_path'], {'dtype': 'np.uint8'}), '(img_path, dtype=np.uint8)\n', (565, 591), True, 'import numpy as np\n')] |
import warnings
import pickle
import pandas as pd
import numpy as np
import random
from math import ceil, floor
from copy import deepcopy
from functions import *
warnings.filterwarnings('ignore')
minicolumns = 10
hypercolumns = 15
sequence_length = 2
number_of_sequences = 20
desired_root = 0.9
verbose = True
# Do the calculations
minicolumns_set = [5, 8, 10, 15]
for minicolumns in minicolumns_set:
print('hypercolumns', hypercolumns)
print('minicolumns', minicolumns)
pattern_seed = np.random.randint(0, 20)
aux = find_root_empirical(desired_root, hypercolumns, minicolumns, sequence_length, pattern_seed, tolerance=0.01, verbose=verbose)
capacity, p_root, trials = aux
# Read
data_frame = pd.read_csv('../storage_capacity_data.csv', index_col=0)
# Write
data_frame = data_frame.append({'hypercolumns':hypercolumns, 'minicolumns':minicolumns, 'sequence_length':sequence_length,
'capacity':capacity, 'p_critical':p_root, 'trials':trials }, ignore_index=True)
# Store the data base
data_frame.to_csv('../storage_capacity_data.csv')
print('Stored')
print('================') | [
"numpy.random.randint",
"warnings.filterwarnings",
"pandas.read_csv"
] | [((164, 197), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (187, 197), False, 'import warnings\n'), ((503, 527), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (520, 527), True, 'import numpy as np\n'), ((729, 785), 'pandas.read_csv', 'pd.read_csv', (['"""../storage_capacity_data.csv"""'], {'index_col': '(0)'}), "('../storage_capacity_data.csv', index_col=0)\n", (740, 785), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
author: <NAME> (github Boyne272)
Last updated on Wed Aug 28 08:46:31 2019
"""
import sys
import time as tm
import random
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
def percent_print(i, i_max, interval=1, length=50):
"""
Print a progress bar or percentage value
Parameters
----------
i : int
The current iteration number
i_max : int
The total number of iterations to complete
interval : int, optional
The frequency of updating the progress bar (set high for long
simulations)
Returns
-------
updated : bool
Whether the update bar was updated
"""
if i % interval == 0:
# print percent
# sys.stdout.write("\r %.1f %% Done" % (100*i/i_max))
# print progress bar
_m = int(length * i/i_max) + 1
_n = length - _m
sys.stdout.write("\rProgress |" + "#"*_m + " "*_n + "|")
# update the string on screen
sys.stdout.flush()
return True
if i == i_max:
sys.stdout.write("\rProgress |" + "#" * length + "|")
sys.stdout.flush()
return True
return False
def get_img(path):
"Load an image as a numpy array"
img = Image.open(path)
arr = np.array(img.convert('RGB')).astype(float)/255.
return arr
def set_seed(seed):
"""
Use this to set ALL the random seeds to a fixed value and take out any
randomness from cuda kernels
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
return True
class progress_bar():
"""
A simple progress bar for quick and easy user output
"""
def __init__(self, imax, refresh=1, length=50):
# store parameters
self.imax = max(imax - 1, 1) # ensure a value of 1 wil not break it
self.length = length
self.refresh = refresh
# create the time and iteration stores
self.times = []
self.iterations = []
self.start = tm.clock()
def add_time(self, iteration):
"store the current time and iteration"
self.times.append(tm.clock()-self.start)
self.iterations.append(iteration)
def print_bar(self, i):
"update the progress bar"
_m = int(self.length * i/self.imax) + 1
_n = self.length - _m
sys.stdout.write("\rProgress |" + "#" * _m + " " * _n +
"| %.4f s" % self.times[-1])
def __call__(self, i):
"if on correct iteration update the progress bar and store the time"
if (i % self.refresh) == 0:
self.add_time(i)
self.print_bar(i)
return True
return False
def plot_time(self, axis=None):
"plot the time vs iterations on the axis if given"
if axis is None:
_fig, axis = plt.subplots()
axis.plot(self.iterations, self.times, '-o')
axis.set(xlabel="Iteration", ylabel="Time (s)")
return axis
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"PIL.Image.open",
"time.clock",
"random.seed",
"numpy.random.seed",
"sys.stdout.flush",
"matplotlib.pyplot.subplots",
"sys.stdout.write"
] | [((1336, 1352), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1346, 1352), False, 'from PIL import Image\n'), ((1586, 1603), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1597, 1603), False, 'import random\n'), ((1609, 1629), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1623, 1629), True, 'import numpy as np\n'), ((1635, 1658), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1652, 1658), False, 'import torch\n'), ((1664, 1696), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1690, 1696), False, 'import torch\n'), ((962, 1022), 'sys.stdout.write', 'sys.stdout.write', (["('\\rProgress |' + '#' * _m + ' ' * _n + '|')"], {}), "('\\rProgress |' + '#' * _m + ' ' * _n + '|')\n", (978, 1022), False, 'import sys\n'), ((1069, 1087), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1085, 1087), False, 'import sys\n'), ((1140, 1193), 'sys.stdout.write', 'sys.stdout.write', (["('\\rProgress |' + '#' * length + '|')"], {}), "('\\rProgress |' + '#' * length + '|')\n", (1156, 1193), False, 'import sys\n'), ((1203, 1221), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1219, 1221), False, 'import sys\n'), ((2258, 2268), 'time.clock', 'tm.clock', ([], {}), '()\n', (2266, 2268), True, 'import time as tm\n'), ((2605, 2694), 'sys.stdout.write', 'sys.stdout.write', (["('\\rProgress |' + '#' * _m + ' ' * _n + '| %.4f s' % self.times[-1])"], {}), "('\\rProgress |' + '#' * _m + ' ' * _n + '| %.4f s' % self.\n times[-1])\n", (2621, 2694), False, 'import sys\n'), ((3122, 3136), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3134, 3136), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2394), 'time.clock', 'tm.clock', ([], {}), '()\n', (2392, 2394), True, 'import time as tm\n')] |
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
from sklearn.neighbors import KernelDensity
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
# Estimate the distribusion of P{A|Y}
def density_estimation(Y, A, Y_test=[]):
bandwidth = np.sqrt( max(np.median(np.abs(Y)), 0.01))
kde_0 = KernelDensity(kernel='linear', bandwidth=bandwidth).fit(Y[A==0][:, np.newaxis])
kde_1 = KernelDensity(kernel='linear', bandwidth=bandwidth).fit(Y[A==1][:, np.newaxis])
log_dens_0 = np.exp(np.squeeze(kde_0.score_samples(Y[:, np.newaxis])))
log_dens_1 = np.exp(np.squeeze(kde_1.score_samples(Y[:, np.newaxis])))
p_0 = np.sum(A==0) / A.shape[0]
p_1 = 1 - p_0
# p(A=1|y) = p(y|A=1)p(A=1) / (p(y|A=1)p(A=1) + p(y|A=0)p(A=0))
p_success = (log_dens_1*p_1) / (log_dens_1*p_1 + log_dens_0*p_0 + 1e-10)
p_success_test = []
if len(Y_test)>0:
log_dens_0_test = np.exp(np.squeeze(kde_0.score_samples(Y_test[:, np.newaxis])))
log_dens_1_test = np.exp(np.squeeze(kde_1.score_samples(Y_test[:, np.newaxis])))
p_success_test = (log_dens_1_test*p_1) / (log_dens_1_test*p_1 + log_dens_0_test*p_0 + 1e-10)
return p_success, p_success_test
# Define linear model
class linear_model(torch.nn.Module):
def __init__(self,
in_shape=1,
out_shape=2):
super().__init__()
self.in_shape = in_shape
self.out_shape = out_shape
self.build_model()
def build_model(self):
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.out_shape, bias=True),
)
def forward(self, x):
return torch.squeeze(self.base_model(x))
# Define deep neural net model for classification
class deep_model(torch.nn.Module):
def __init__(self,
in_shape=1,
out_shape=1):
super().__init__()
self.in_shape = in_shape
self.dim_h = 64
self.dropout = 0.5
self.out_shape = out_shape
self.build_model()
def build_model(self):
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.dim_h, bias=True),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.dim_h, self.out_shape, bias=True),
)
def forward(self, x):
return torch.squeeze(self.base_model(x))
# Define deep model for regression
class deep_reg_model(torch.nn.Module):
def __init__(self,
in_shape=1,
out_shape=1):
super().__init__()
self.in_shape = in_shape
self.dim_h = 64 #in_shape*10
self.out_shape = out_shape
self.build_model()
def build_model(self):
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.dim_h, bias=True),
nn.ReLU(),
nn.Linear(self.dim_h, self.out_shape, bias=True),
)
def forward(self, x):
return torch.squeeze(self.base_model(x))
# Define deep regression model, used by the fair dummies test
class deep_proba_model(torch.nn.Module):
def __init__(self,
in_shape=1):
super().__init__()
self.in_shape = in_shape
self.dim_h = 64 #in_shape*10
self.dropout = 0.5
self.out_shape = 1
self.build_model()
def build_model(self):
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.dim_h, bias=True),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.dim_h, self.out_shape, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return torch.squeeze(self.base_model(x))
def calc_accuracy(outputs,Y): #Care outputs are going to be in dimension 2
max_vals, max_indices = torch.max(outputs,1)
acc = (max_indices == Y).sum().detach().cpu().numpy()/max_indices.size()[0]
return acc
def compute_acc(Yhat,Y):
_, predicted = torch.max(Yhat, 1)
total = Y.size(0)
correct = (predicted == Y).sum().item()
acc = correct/total
return acc
def compute_acc_numpy(Yhat,Y):
Yhat = torch.from_numpy(Yhat)
Y = torch.from_numpy(Y)
return compute_acc(Yhat,Y)
def pytorch_standard_scaler(x):
m = x.mean(0, keepdim=True)
s = x.std(0, unbiased=False, keepdim=True)
x -= m
x /= s
return x
# fit a neural netwok on a given data, used by the fair dummies test
class GeneralLearner:
def __init__(self,
lr,
epochs,
cost_func,
in_shape,
batch_size,
model_type,
out_shape = 1):
# input dim
self.in_shape = in_shape
# output dim
self.out_shape = out_shape
# Data normalization
self.X_scaler = StandardScaler()
# learning rate
self.lr = lr
# number of epochs
self.epochs = epochs
# cost to minimize
self.cost_func = cost_func
# define a predictive model
self.model_type = model_type
if self.model_type == "deep_proba":
self.model = deep_proba_model(in_shape=in_shape)
elif self.model_type == "deep_regression":
self.model = deep_model(in_shape=in_shape, out_shape=self.out_shape)
else:
raise
# optimizer
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
# minibatch size
self.batch_size = batch_size
# fit a model by sweeping over all data points
def internal_epoch(self,X_,Y_):
# shuffle data
shuffle_idx = np.arange(X_.shape[0])
np.random.shuffle(shuffle_idx)
X = X_.clone()[shuffle_idx]
Y = Y_.clone()[shuffle_idx]
# fit pred func
self.model.train()
batch_size = self.batch_size
epoch_losses = []
for idx in range(0, X.shape[0], batch_size):
self.optimizer.zero_grad()
batch_x = X[idx : min(idx + batch_size, X.shape[0]),:]
batch_y = Y[idx : min(idx + batch_size, Y.shape[0])]
# utility loss
batch_Yhat = self.model(batch_x)
loss = self.cost_func(batch_Yhat,batch_y)
loss.backward()
self.optimizer.step()
epoch_losses.append(loss.cpu().detach().numpy())
epoch_loss = np.mean(epoch_losses)
return epoch_loss
def run_epochs(self,X,Y):
for epoch in range(self.epochs):
epoch_loss = self.internal_epoch(X,Y)
# fit a model on training data
def fit(self,X,Y):
self.X_scaler.fit(X)
Xp = torch.from_numpy(self.X_scaler.transform(X)).float()
Yp = torch.from_numpy(Y).float()
# evaluate at init
self.model.eval()
Yhat = self.model(Xp)
print('Init Loss = ' + str(self.cost_func(Yhat, Yp).detach().numpy()))
self.model.train()
self.run_epochs(Xp,Yp)
# evaluate
self.model.eval()
Yhat = self.model(Xp)
print('Final Loss = ' + str(self.cost_func(Yhat, Yp).detach().numpy()))
def predict(self,X):
self.model.eval()
Xp = torch.from_numpy(self.X_scaler.transform(X)).float()
Yhat = self.model(Xp)
Yhat = Yhat.detach().numpy()
return Yhat
# run the fair dummies test
# Yhat_cal, A_cal, Y_cal: are used to fit a model that formulates the test statistics
# Yhat, A, Y: variables in which we test whether Yhat is indpendent on A given Y
def fair_dummies_test_regression(Yhat_cal,
A_cal,
Y_cal,
Yhat,
A,
Y,
num_reps=1,
num_p_val_rep=1000,
reg_func_name="Net",
lr=0.1,
return_vec=False):
p_success, dummy = density_estimation(np.concatenate((Y_cal,Y),0),np.concatenate((A_cal,A),0))
p_success = p_success[Y_cal.shape[0]:]
out_shape = 1
if len(Yhat.shape) > 1:
out_shape = Yhat.shape[1]
Y_cal = Y_cal[:,np.newaxis]
Y = Y[:,np.newaxis]
test_i = []
for i in range(num_reps):
# fit regressor
if reg_func_name == "RF":
regr = RandomForestRegressor(n_estimators = 10)
elif reg_func_name == "Net":
regr = GeneralLearner(lr=lr,
epochs=200,
cost_func=nn.MSELoss(),
in_shape=2,
batch_size=128,
model_type="deep_regression",
out_shape=out_shape)
features_cal = np.concatenate((A_cal[:,np.newaxis],Y_cal),1)
regr.fit(features_cal, Yhat_cal)
# compute error on holdout points
features_orig = np.concatenate((A[:,np.newaxis], Y),1)
output_orig = regr.predict(features_orig)
est_orig_err = np.mean((Yhat - output_orig)**2)
# generate A and compare
est_fake_err = np.zeros(num_p_val_rep)
for inter_p_value in range(num_p_val_rep):
random_array = np.random.uniform(low=0.0, high=1.0, size=A.shape)
A_tilde = (random_array < p_success).astype(float)
features_fake = np.concatenate((A_tilde[:,np.newaxis],Y),1)
output_fake = regr.predict(features_fake)
est_fake_err[inter_p_value] = np.mean((Yhat - output_fake)**2)
p_val = 1.0/(num_p_val_rep+1) * (1 + sum(est_orig_err >= est_fake_err))
test_i.append(p_val)
print("Fair dummies test (regression score), p-value:", np.mean(test_i)) # should be uniform under ind.
out = test_i[0]
if return_vec:
out = test_i
return out
def classification_score(y_hat,y):
assert(y <= len(y_hat))
prob = y_hat[int(y)]
return prob
# run the fair dummies test
# Yhat_cal, A_cal, Y_cal: are used to fit a model that formulates the test statistics
# Yhat, A, Y: variables in which we test whether Yhat is indpendent on A given Y
def fair_dummies_test_classification(Yhat_cal,
A_cal,
Y_cal,
Yhat,
A,
Y,
num_reps=10,
num_p_val_rep=1000,
reg_func_name="Net"):
p_success, dummy = density_estimation(np.concatenate((Y_cal,Y),0),np.concatenate((A_cal,A),0))
p_success = p_success[Y_cal.shape[0]:]
Yhat_cal_score = np.array([classification_score(Yhat_cal[i],Y_cal[i]) for i in range(Yhat_cal.shape[0])], dtype = float)
Yhat_score = np.array([classification_score(Yhat[i],Y[i]) for i in range(Y.shape[0])], dtype = float)
Y_cal = pd.get_dummies(Y_cal).values.astype(float)
Y = pd.get_dummies(Y).values.astype(float)
test_i = []
err_func = nn.BCELoss()
for i in range(num_reps):
features_cal = np.concatenate((A_cal[:,np.newaxis],Y_cal),1)
# fit regressor
if reg_func_name == "RF":
regr = RandomForestRegressor(n_estimators = 10)
elif reg_func_name == "Net":
regr = GeneralLearner(lr=0.1,
epochs=200,
cost_func=nn.BCELoss(),
in_shape=features_cal.shape[1],
batch_size=128,
model_type="deep_proba")
regr.fit(features_cal, Yhat_cal_score)
# compute error on holdout points
features_orig = np.concatenate((A[:,np.newaxis], Y),1)
output_orig = regr.predict(features_orig)
if reg_func_name == "RF":
est_orig_err = np.mean((Yhat_score - output_orig)**2)
elif reg_func_name == "Net":
est_orig_err = err_func(torch.from_numpy(output_orig).float(),
torch.from_numpy(Yhat_score).float()).detach().cpu().numpy()
# generate A and compare
est_fake_err = np.zeros(num_p_val_rep)
for inter_p_value in range(num_p_val_rep):
random_array = np.random.uniform(low=0.0, high=1.0, size=A.shape)
A_tilde = (random_array < p_success).astype(float)
features_fake = np.concatenate((A_tilde[:,np.newaxis],Y),1)
output_fake = regr.predict(features_fake)
if reg_func_name == "RF":
est_fake_err[inter_p_value] = np.mean((Yhat_score - output_fake)**2)
elif reg_func_name == "Net":
est_fake_err[inter_p_value] = err_func(torch.from_numpy(output_fake).float(),
torch.from_numpy(Yhat_score).float()).detach().cpu().numpy()
p_val = 1.0/(num_p_val_rep+1) * (1 + sum(est_orig_err >= est_fake_err))
test_i.append(p_val)
print("Fair dummies test (classification score), p-value:", np.mean(test_i)) # should be uniform under ind.
return test_i[0]
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.max",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.arange",
"numpy.mean",
"torch.nn.Sigmoid",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.neighbors.KernelDensity",
"numpy.concatenate",
"numpy.abs",
"pandas.get_dummies",
"sklearn.preproce... | [((3896, 3917), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (3905, 3917), False, 'import torch\n'), ((4057, 4075), 'torch.max', 'torch.max', (['Yhat', '(1)'], {}), '(Yhat, 1)\n', (4066, 4075), False, 'import torch\n'), ((4224, 4246), 'torch.from_numpy', 'torch.from_numpy', (['Yhat'], {}), '(Yhat)\n', (4240, 4246), False, 'import torch\n'), ((4255, 4274), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (4271, 4274), False, 'import torch\n'), ((11324, 11336), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (11334, 11336), True, 'import torch.nn as nn\n'), ((703, 717), 'numpy.sum', 'np.sum', (['(A == 0)'], {}), '(A == 0)\n', (709, 717), True, 'import numpy as np\n'), ((4927, 4943), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4941, 4943), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5763, 5785), 'numpy.arange', 'np.arange', (['X_.shape[0]'], {}), '(X_.shape[0])\n', (5772, 5785), True, 'import numpy as np\n'), ((5794, 5824), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_idx'], {}), '(shuffle_idx)\n', (5811, 5824), True, 'import numpy as np\n'), ((6513, 6534), 'numpy.mean', 'np.mean', (['epoch_losses'], {}), '(epoch_losses)\n', (6520, 6534), True, 'import numpy as np\n'), ((8190, 8219), 'numpy.concatenate', 'np.concatenate', (['(Y_cal, Y)', '(0)'], {}), '((Y_cal, Y), 0)\n', (8204, 8219), True, 'import numpy as np\n'), ((8218, 8247), 'numpy.concatenate', 'np.concatenate', (['(A_cal, A)', '(0)'], {}), '((A_cal, A), 0)\n', (8232, 8247), True, 'import numpy as np\n'), ((9016, 9064), 'numpy.concatenate', 'np.concatenate', (['(A_cal[:, np.newaxis], Y_cal)', '(1)'], {}), '((A_cal[:, np.newaxis], Y_cal), 1)\n', (9030, 9064), True, 'import numpy as np\n'), ((9170, 9210), 'numpy.concatenate', 'np.concatenate', (['(A[:, np.newaxis], Y)', '(1)'], {}), '((A[:, np.newaxis], Y), 1)\n', (9184, 9210), True, 'import numpy as np\n'), ((9282, 9316), 'numpy.mean', 'np.mean', (['((Yhat - output_orig) ** 2)'], {}), '((Yhat - output_orig) ** 2)\n', (9289, 9316), True, 'import numpy as np\n'), ((9372, 9395), 'numpy.zeros', 'np.zeros', (['num_p_val_rep'], {}), '(num_p_val_rep)\n', (9380, 9395), True, 'import numpy as np\n'), ((9963, 9978), 'numpy.mean', 'np.mean', (['test_i'], {}), '(test_i)\n', (9970, 9978), True, 'import numpy as np\n'), ((10857, 10886), 'numpy.concatenate', 'np.concatenate', (['(Y_cal, Y)', '(0)'], {}), '((Y_cal, Y), 0)\n', (10871, 10886), True, 'import numpy as np\n'), ((10885, 10914), 'numpy.concatenate', 'np.concatenate', (['(A_cal, A)', '(0)'], {}), '((A_cal, A), 0)\n', (10899, 10914), True, 'import numpy as np\n'), ((11391, 11439), 'numpy.concatenate', 'np.concatenate', (['(A_cal[:, np.newaxis], Y_cal)', '(1)'], {}), '((A_cal[:, np.newaxis], Y_cal), 1)\n', (11405, 11439), True, 'import numpy as np\n'), ((12029, 12069), 'numpy.concatenate', 'np.concatenate', (['(A[:, np.newaxis], Y)', '(1)'], {}), '((A[:, np.newaxis], Y), 1)\n', (12043, 12069), True, 'import numpy as np\n'), ((12485, 12508), 'numpy.zeros', 'np.zeros', (['num_p_val_rep'], {}), '(num_p_val_rep)\n', (12493, 12508), True, 'import numpy as np\n'), ((13381, 13396), 'numpy.mean', 'np.mean', (['test_i'], {}), '(test_i)\n', (13388, 13396), True, 'import numpy as np\n'), ((370, 421), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""linear"""', 'bandwidth': 'bandwidth'}), "(kernel='linear', bandwidth=bandwidth)\n", (383, 421), False, 'from sklearn.neighbors import KernelDensity\n'), ((462, 513), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""linear"""', 'bandwidth': 'bandwidth'}), "(kernel='linear', bandwidth=bandwidth)\n", (475, 513), False, 'from sklearn.neighbors import KernelDensity\n'), ((1605, 1656), 'torch.nn.Linear', 'nn.Linear', (['self.in_shape', 'self.out_shape'], {'bias': '(True)'}), '(self.in_shape, self.out_shape, bias=True)\n', (1614, 1656), True, 'import torch.nn as nn\n'), ((2171, 2218), 'torch.nn.Linear', 'nn.Linear', (['self.in_shape', 'self.dim_h'], {'bias': '(True)'}), '(self.in_shape, self.dim_h, bias=True)\n', (2180, 2218), True, 'import torch.nn as nn\n'), ((2236, 2245), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2243, 2245), True, 'import torch.nn as nn\n'), ((2263, 2287), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (2273, 2287), True, 'import torch.nn as nn\n'), ((2305, 2353), 'torch.nn.Linear', 'nn.Linear', (['self.dim_h', 'self.out_shape'], {'bias': '(True)'}), '(self.dim_h, self.out_shape, bias=True)\n', (2314, 2353), True, 'import torch.nn as nn\n'), ((2844, 2891), 'torch.nn.Linear', 'nn.Linear', (['self.in_shape', 'self.dim_h'], {'bias': '(True)'}), '(self.in_shape, self.dim_h, bias=True)\n', (2853, 2891), True, 'import torch.nn as nn\n'), ((2909, 2918), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2916, 2918), True, 'import torch.nn as nn\n'), ((2936, 2984), 'torch.nn.Linear', 'nn.Linear', (['self.dim_h', 'self.out_shape'], {'bias': '(True)'}), '(self.dim_h, self.out_shape, bias=True)\n', (2945, 2984), True, 'import torch.nn as nn\n'), ((3492, 3539), 'torch.nn.Linear', 'nn.Linear', (['self.in_shape', 'self.dim_h'], {'bias': '(True)'}), '(self.in_shape, self.dim_h, bias=True)\n', (3501, 3539), True, 'import torch.nn as nn\n'), ((3557, 3566), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3564, 3566), True, 'import torch.nn as nn\n'), ((3584, 3608), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (3594, 3608), True, 'import torch.nn as nn\n'), ((3626, 3674), 'torch.nn.Linear', 'nn.Linear', (['self.dim_h', 'self.out_shape'], {'bias': '(True)'}), '(self.dim_h, self.out_shape, bias=True)\n', (3635, 3674), True, 'import torch.nn as nn\n'), ((3692, 3704), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3702, 3704), True, 'import torch.nn as nn\n'), ((8554, 8592), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (8575, 8592), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((9474, 9524), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': 'A.shape'}), '(low=0.0, high=1.0, size=A.shape)\n', (9491, 9524), True, 'import numpy as np\n'), ((9617, 9663), 'numpy.concatenate', 'np.concatenate', (['(A_tilde[:, np.newaxis], Y)', '(1)'], {}), '((A_tilde[:, np.newaxis], Y), 1)\n', (9631, 9663), True, 'import numpy as np\n'), ((9757, 9791), 'numpy.mean', 'np.mean', (['((Yhat - output_fake) ** 2)'], {}), '((Yhat - output_fake) ** 2)\n', (9764, 9791), True, 'import numpy as np\n'), ((11515, 11553), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (11536, 11553), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((12180, 12220), 'numpy.mean', 'np.mean', (['((Yhat_score - output_orig) ** 2)'], {}), '((Yhat_score - output_orig) ** 2)\n', (12187, 12220), True, 'import numpy as np\n'), ((12587, 12637), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': 'A.shape'}), '(low=0.0, high=1.0, size=A.shape)\n', (12604, 12637), True, 'import numpy as np\n'), ((12730, 12776), 'numpy.concatenate', 'np.concatenate', (['(A_tilde[:, np.newaxis], Y)', '(1)'], {}), '((A_tilde[:, np.newaxis], Y), 1)\n', (12744, 12776), True, 'import numpy as np\n'), ((338, 347), 'numpy.abs', 'np.abs', (['Y'], {}), '(Y)\n', (344, 347), True, 'import numpy as np\n'), ((6853, 6872), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (6869, 6872), False, 'import torch\n'), ((11202, 11223), 'pandas.get_dummies', 'pd.get_dummies', (['Y_cal'], {}), '(Y_cal)\n', (11216, 11223), True, 'import pandas as pd\n'), ((11253, 11270), 'pandas.get_dummies', 'pd.get_dummies', (['Y'], {}), '(Y)\n', (11267, 11270), True, 'import pandas as pd\n'), ((12913, 12953), 'numpy.mean', 'np.mean', (['((Yhat_score - output_fake) ** 2)'], {}), '((Yhat_score - output_fake) ** 2)\n', (12920, 12953), True, 'import numpy as np\n'), ((8763, 8775), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (8773, 8775), True, 'import torch.nn as nn\n'), ((11725, 11737), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (11735, 11737), True, 'import torch.nn as nn\n'), ((12292, 12321), 'torch.from_numpy', 'torch.from_numpy', (['output_orig'], {}), '(output_orig)\n', (12308, 12321), False, 'import torch\n'), ((12367, 12395), 'torch.from_numpy', 'torch.from_numpy', (['Yhat_score'], {}), '(Yhat_score)\n', (12383, 12395), False, 'import torch\n'), ((13048, 13077), 'torch.from_numpy', 'torch.from_numpy', (['output_fake'], {}), '(output_fake)\n', (13064, 13077), False, 'import torch\n'), ((13142, 13170), 'torch.from_numpy', 'torch.from_numpy', (['Yhat_score'], {}), '(Yhat_score)\n', (13158, 13170), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Run with:
sudo python ./setup.py install
"""
import os
import sys
import warnings
import ez_setup
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
if sys.version_info[:2] < (2, 7) or (sys.version_info[:1] == 3 and sys.version_info[:2] < (3, 5)):
raise Exception('This version of gensim needs Python 2.7, 3.5 or later.')
ez_setup.use_setuptools()
# the following code is adapted from tornado's setup.py:
# https://github.com/tornadoweb/tornado/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up word2vec and doc2vec training, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for gensim to run,
although they do result in significant speed improvements for some modules.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(
self.warning_message +
"Extension modules" +
"There was an issue with your platform configuration - see above.")
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(
self.warning_message +
"The %s extension module" % (name,) +
"The output above this warning shows how the compilation failed.")
# the following is needed to be able to add numpy's include dirs... without
# importing numpy directly in this script, before it's actually installed!
# http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# https://docs.python.org/2/library/__builtin__.html#module-__builtin__
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
model_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')
gensim_dir = os.path.join(os.path.dirname(__file__), 'gensim')
cmdclass = {'build_ext': custom_build_ext}
WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
LONG_DESCRIPTION = u"""
==============================================
gensim -- Topic Modelling in Python
==============================================
|Travis|_
|Wheel|_
.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg
.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg
.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim
.. _Downloads: https://pypi.python.org/pypi/gensim
.. _License: http://radimrehurek.com/gensim/about.html
.. _Wheel: https://pypi.python.org/pypi/gensim
Gensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.
Target audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.
Features
---------
* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),
* **Intuitive interfaces**
* easy to plug in your own input corpus/datastream (trivial streaming API)
* easy to extend with other Vector Space algorithms (trivial transformation API)
* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,
**Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.
* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.
* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.
If this feature list left you scratching your head, you can first read more about the `Vector
Space Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised
document analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.
Installation
------------
This software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.
You must have them installed prior to installing `gensim`.
It is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.
The simple way to install `gensim` is::
pip install -U gensim
Or, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package,
you'd run::
python setup.py test
python setup.py install
For alternative modes of installation (without root privileges, development
installation, optional install features), see the `install documentation <http://radimrehurek.com/gensim/install.html>`_.
This version has been tested under Python 2.7, 3.5 and 3.6. Support for Python 2.6, 3.3 and 3.4 was dropped in gensim 1.0.0. Install gensim 0.13.4 if you *must* use Python 2.6, 3.3 or 3.4. Support for Python 2.5 was dropped in gensim 0.10.0; install gensim 0.9.1 if you *must* use Python 2.5). Gensim's github repo is hooked against `Travis CI for automated testing <https://travis-ci.org/RaRe-Technologies/gensim>`_ on every commit push and pull request.
How come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?
--------------------------------------------------------------------------------------------------------
Many scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).
Memory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.
Documentation
-------------
* `QuickStart`_
* `Tutorials`_
* `Tutorial Videos`_
* `Official Documentation and Walkthrough`_
Citing gensim
-------------
When `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::
@inproceedings{rehurek_lrec,
title = {{Software Framework for Topic Modelling with Large Corpora}},
author = {Radim {\\<NAME>{\\<NAME>}{\\v r}ek and <NAME>},
booktitle = {{Proceedings of the LREC 2010 Workshop on New
Challenges for NLP Frameworks}},
pages = {45--50},
year = 2010,
month = May,
day = 22,
publisher = {ELRA},
address = {Valletta, Malta},
language={English}
}
----------------
Gensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.
Copyright (c) 2009-now <NAME>
|Analytics|_
.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name
.. _Analytics: https://github.com/igrigorik/ga-beacon
.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/
.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials
.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos
.. _QuickStart: https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/gensim%20Quick%20Start.ipynb
"""
distributed_env = ['Pyro4 >= 4.27']
win_testenv = [
'pytest',
'pytest-rerunfailures',
'mock',
'cython',
'pyemd',
'testfixtures',
'scikit-learn',
'Morfessor==2.0.2a4',
]
linux_testenv = win_testenv + [
'annoy',
'tensorflow <= 1.3.0',
'keras >= 2.0.4',
]
setup(
name='gensim',
version='3.3.0',
description='Python framework for fast Vector Space Modelling',
long_description=LONG_DESCRIPTION,
ext_modules=[
Extension('gensim.models.word2vec_inner',
sources=['./gensim/models/word2vec_inner.c'],
include_dirs=[model_dir]),
Extension('gensim.models.doc2vec_inner',
sources=['./gensim/models/doc2vec_inner.c'],
include_dirs=[model_dir]),
Extension('gensim.models.fasttext_inner',
sources=['./gensim/models/fasttext_inner.c'],
include_dirs=[model_dir])
],
cmdclass=cmdclass,
packages=find_packages(),
author=u'<NAME>',
author_email='<EMAIL>',
url='http://radimrehurek.com/gensim',
download_url='http://pypi.python.org/pypi/gensim',
keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '
'LSA, LSI, Latent Dirichlet Allocation, LDA, '
'Hierarchical Dirichlet Process, HDP, Random Projections, '
'TFIDF, word2vec',
platforms='any',
zip_safe=False,
classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
],
test_suite="gensim.test",
setup_requires=[
'numpy >= 1.11.3'
],
install_requires=[
'numpy >= 1.11.3',
'scipy >= 0.18.1',
'six >= 1.5.0',
'smart_open >= 1.2.1',
],
tests_require=linux_testenv,
extras_require={
'distributed': distributed_env,
'test-win': win_testenv,
'test': linux_testenv,
'docs': linux_testenv + distributed_env + ['sphinx', 'sphinxcontrib-napoleon', 'plotly', 'pattern', 'sphinxcontrib.programoutput'],
},
include_package_data=True,
)
| [
"ez_setup.use_setuptools",
"setuptools.find_packages",
"setuptools.Extension",
"os.path.dirname",
"sys.exc_info",
"setuptools.command.build_ext.build_ext.run",
"setuptools.command.build_ext.build_ext.build_extension",
"numpy.get_include",
"warnings.warn",
"setuptools.command.build_ext.build_ext.fi... | [((552, 577), 'ez_setup.use_setuptools', 'ez_setup.use_setuptools', ([], {}), '()\n', (575, 577), False, 'import ez_setup\n'), ((3384, 3409), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3399, 3409), False, 'import os\n'), ((3457, 3482), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3472, 3482), False, 'import os\n'), ((2938, 2970), 'setuptools.command.build_ext.build_ext.finalize_options', 'build_ext.finalize_options', (['self'], {}), '(self)\n', (2964, 2970), False, 'from setuptools.command.build_ext import build_ext\n'), ((10553, 10568), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (10566, 10568), False, 'from setuptools import setup, find_packages, Extension\n'), ((1894, 1913), 'setuptools.command.build_ext.build_ext.run', 'build_ext.run', (['self'], {}), '(self)\n', (1907, 1913), False, 'from setuptools.command.build_ext import build_ext\n'), ((2294, 2330), 'setuptools.command.build_ext.build_ext.build_extension', 'build_ext.build_extension', (['self', 'ext'], {}), '(self, ext)\n', (2319, 2330), False, 'from setuptools.command.build_ext import build_ext\n'), ((3336, 3355), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3353, 3355), False, 'import numpy\n'), ((10080, 10198), 'setuptools.Extension', 'Extension', (['"""gensim.models.word2vec_inner"""'], {'sources': "['./gensim/models/word2vec_inner.c']", 'include_dirs': '[model_dir]'}), "('gensim.models.word2vec_inner', sources=[\n './gensim/models/word2vec_inner.c'], include_dirs=[model_dir])\n", (10089, 10198), False, 'from setuptools import setup, find_packages, Extension\n'), ((10227, 10343), 'setuptools.Extension', 'Extension', (['"""gensim.models.doc2vec_inner"""'], {'sources': "['./gensim/models/doc2vec_inner.c']", 'include_dirs': '[model_dir]'}), "('gensim.models.doc2vec_inner', sources=[\n './gensim/models/doc2vec_inner.c'], include_dirs=[model_dir])\n", (10236, 10343), False, 'from setuptools import setup, find_packages, Extension\n'), ((10372, 10490), 'setuptools.Extension', 'Extension', (['"""gensim.models.fasttext_inner"""'], {'sources': "['./gensim/models/fasttext_inner.c']", 'include_dirs': '[model_dir]'}), "('gensim.models.fasttext_inner', sources=[\n './gensim/models/fasttext_inner.c'], include_dirs=[model_dir])\n", (10381, 10490), False, 'from setuptools import setup, find_packages, Extension\n'), ((2032, 2162), 'warnings.warn', 'warnings.warn', (["(self.warning_message + 'Extension modules' +\n 'There was an issue with your platform configuration - see above.')"], {}), "(self.warning_message + 'Extension modules' +\n 'There was an issue with your platform configuration - see above.')\n", (2045, 2162), False, 'import warnings\n'), ((2449, 2594), 'warnings.warn', 'warnings.warn', (["(self.warning_message + 'The %s extension module' % (name,) +\n 'The output above this warning shows how the compilation failed.')"], {}), "(self.warning_message + 'The %s extension module' % (name,) +\n 'The output above this warning shows how the compilation failed.')\n", (2462, 2594), False, 'import warnings\n'), ((1956, 1970), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1968, 1970), False, 'import sys\n'), ((2373, 2387), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2385, 2387), False, 'import sys\n')] |
import copy
import itertools
import os
import uuid
from typing import Callable, List, Tuple
import numpy as np
import ray
from gym import Env
from gym.spaces import Box
from interact.environments.vector_env import VectorEnv
from interact.experience.episode_batch import EpisodeBatch
from interact.experience.sample_batch import SampleBatch
from interact.policies.base import Policy
from interact.typing import TensorType
class Worker:
"""Executes a policy in an environment and collects experience.
Args:
env_fn: A function that returns a Gym environment when called. The returned
environment is used to collect experience.
policy_fn: A function that returns a Policy when called. The returned Policy is
used to collect experience.
num_envs: The number of environments to synchronously execute within this
worker.
seed: Optional seed with which to see this worker's environments.
"""
def __init__(
self,
env_fn: Callable[[], Env],
policy_fn: Callable[[], Policy],
num_envs: int = 1,
seed: int = None,
):
self.env = VectorEnv([env_fn] * num_envs)
if seed is None:
seed = int.from_bytes(os.urandom(4), byteorder="big")
self.env.seed(seed)
self.env.reset()
self.policy = policy_fn()
self.policy.build(self.env.observation_space.shape)
self.eps_ids = [uuid.uuid4().int for _ in range(self.env.num_envs)]
@classmethod
def as_remote(cls, **kwargs):
return ray.remote(**kwargs)(cls)
def collect(
self, num_steps: int = 1, **kwargs
) -> Tuple[List[SampleBatch], List[dict]]:
"""Executes the policy in the environment and returns the collected experience.
Args:
num_steps: The number of environment steps to execute in each synchronous
environment.
Returns:
episodes: A list of `SamplesBatch`s, where each batch contains experience
from a single episode (each of which may or may not be a complete
episode).
ep_infos: A list of dictionaries containing information about any episodes
which were completed during collection.
"""
batch = SampleBatch()
ep_infos = []
for _ in range(num_steps):
data = self.policy.step(self.env.observations, **kwargs)
data[SampleBatch.OBS] = self.env.observations.copy()
data[SampleBatch.EPS_ID] = copy.copy(self.eps_ids)
clipped_actions = np.asarray(data[SampleBatch.ACTIONS])
if isinstance(self.env.action_space, Box):
clipped_actions = np.clip(
clipped_actions,
self.env.action_space.low,
self.env.action_space.high,
)
next_obs, rewards, dones, infos = self.env.step(clipped_actions)
for i, done in enumerate(dones):
if done:
self.eps_ids[i] = uuid.uuid4().int
# This ensures that terminations which occurred due to the episode time
# limit are not interpreted as environment terminations. This effectively
# implements the partial bootstrapping method described in
# https://arxiv.org/abs/1712.00378
for i, info in enumerate(infos):
if info.get("TimeLimit.truncated", False):
dones[i] = False
next_obs[i] = info.pop("TimeLimit.next_obs")
data[SampleBatch.REWARDS] = rewards
data[SampleBatch.DONES] = dones
data[SampleBatch.NEXT_OBS] = next_obs
batch.add(data)
for info in infos:
maybe_ep_info = info.get("episode")
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
return batch.extract_episodes(), ep_infos
def update_policy(self, weights: List[TensorType]):
"""Updates the weights of this worker's policy.
Args:
weights: A list of weights to be applied to the policy.
Returns:
None
"""
self.policy.set_weights(weights)
class Runner:
"""Responsible for collecting experience from an environment.
This class is uses an arbitrary number of `Worker`s to execute a policy in an
environment and aggregate the collected experience.
Args:
env_fn: A function that returns a Gym environment when called. The returned
environment is used to collect experience.
policy_fn: A function that returns a Policy when called. The returned Policy is
used to collect experience.
num_envs_per_worker: The number of environments to synchronously execute within
each worker.
num_workers: The number of parallel workers to use for experience collection.
seed: Optional seed with which to see this runner's environments.
"""
def __init__(
self,
env_fn: Callable[[], Env],
policy_fn: Callable[[], Policy],
num_envs_per_worker: int = 1,
num_workers: int = 1,
seed: int = None,
):
if num_workers == 1:
self._workers = [Worker(env_fn, policy_fn, num_envs_per_worker, seed)]
else:
self._workers = [
Worker.as_remote(num_gpus=0).remote(
env_fn, policy_fn, num_envs_per_worker, seed
)
for _ in range(num_workers)
]
def run(self, num_steps: int = 1, **kwargs) -> Tuple[EpisodeBatch, List[dict]]:
"""Executes the policy in the environment and returns the collected experience.
Args:
num_steps: The number of steps to take in each environment.
Returns:
episodes: An `EpisodeBatch` containing the collected data.
ep_infos: A list of dictionaries containing information about any episodes
which were completed during collection.
"""
if len(self._workers) == 1:
episodes, ep_infos = self._workers[0].collect(num_steps, **kwargs)
return EpisodeBatch.from_episodes(episodes), ep_infos
episodes, ep_infos = zip(
*ray.get([w.collect.remote(num_steps, **kwargs) for w in self._workers])
)
ep_infos = list(itertools.chain.from_iterable(ep_infos))
episodes = list(itertools.chain.from_iterable(episodes))
return EpisodeBatch.from_episodes(episodes), ep_infos
def update_policies(self, weights: List[TensorType]):
"""Updates the weights of this runner's policy.
Args:
weights: A list of weights to be applied to the policy.
Returns:
None
"""
if len(self._workers) == 1:
self._workers[0].update_policy(weights)
else:
ray.get([w.update_policy.remote(weights) for w in self._workers])
| [
"numpy.clip",
"interact.experience.episode_batch.EpisodeBatch.from_episodes",
"interact.environments.vector_env.VectorEnv",
"os.urandom",
"interact.experience.sample_batch.SampleBatch",
"numpy.asarray",
"copy.copy",
"uuid.uuid4",
"itertools.chain.from_iterable",
"ray.remote"
] | [((1157, 1187), 'interact.environments.vector_env.VectorEnv', 'VectorEnv', (['([env_fn] * num_envs)'], {}), '([env_fn] * num_envs)\n', (1166, 1187), False, 'from interact.environments.vector_env import VectorEnv\n'), ((2307, 2320), 'interact.experience.sample_batch.SampleBatch', 'SampleBatch', ([], {}), '()\n', (2318, 2320), False, 'from interact.experience.sample_batch import SampleBatch\n'), ((1572, 1592), 'ray.remote', 'ray.remote', ([], {}), '(**kwargs)\n', (1582, 1592), False, 'import ray\n'), ((2553, 2576), 'copy.copy', 'copy.copy', (['self.eps_ids'], {}), '(self.eps_ids)\n', (2562, 2576), False, 'import copy\n'), ((2608, 2645), 'numpy.asarray', 'np.asarray', (['data[SampleBatch.ACTIONS]'], {}), '(data[SampleBatch.ACTIONS])\n', (2618, 2645), True, 'import numpy as np\n'), ((6459, 6498), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['ep_infos'], {}), '(ep_infos)\n', (6488, 6498), False, 'import itertools\n'), ((6524, 6563), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['episodes'], {}), '(episodes)\n', (6553, 6563), False, 'import itertools\n'), ((6581, 6617), 'interact.experience.episode_batch.EpisodeBatch.from_episodes', 'EpisodeBatch.from_episodes', (['episodes'], {}), '(episodes)\n', (6607, 6617), False, 'from interact.experience.episode_batch import EpisodeBatch\n'), ((1248, 1261), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (1258, 1261), False, 'import os\n'), ((1453, 1465), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1463, 1465), False, 'import uuid\n'), ((2735, 2814), 'numpy.clip', 'np.clip', (['clipped_actions', 'self.env.action_space.low', 'self.env.action_space.high'], {}), '(clipped_actions, self.env.action_space.low, self.env.action_space.high)\n', (2742, 2814), True, 'import numpy as np\n'), ((6258, 6294), 'interact.experience.episode_batch.EpisodeBatch.from_episodes', 'EpisodeBatch.from_episodes', (['episodes'], {}), '(episodes)\n', (6284, 6294), False, 'from interact.experience.episode_batch import EpisodeBatch\n'), ((3081, 3093), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3091, 3093), False, 'import uuid\n')] |
# Imports
from __future__ import print_function
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge, Lasso, SGDRegressor, ElasticNet, LinearRegression
from sklearn.multioutput import MultiOutputRegressor
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import math
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_predict, cross_validate
from sklearn.kernel_ridge import KernelRidge
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from sklearn.ensemble import AdaBoostRegressor
from numpy import genfromtxt
from matplotlib import collections as matcoll
##########################################################################################################################################
# Last set of masses and overlap integrals that'll be plotted on delta function
last_number = len(y_test)-1
#######################################################################################################################################
# Find x and y errors for each regression model (in this case randomised search KRR predictions)
masses_pred = []
amplitudes_pred = []
masses_Res = []
amplitudes_Res = []
for i in range(len(RS_Prediction)):
masses_pred.append(RS_Prediction[i][:n_masses])
amplitudes_pred.append(RS_Prediction[i][n_masses:])
masses_Res.append(y_test[i][:n_masses])
amplitudes_Res.append(y_test[i][n_masses:])
np.concatenate(masses_pred, axis=0 )
np.concatenate(amplitudes_pred, axis=0 )
np.concatenate(masses_Res, axis=0 )
np.concatenate(amplitudes_Res, axis=0 )
x_error = np.sqrt(mean_squared_error(masses_pred, masses_Res))
y_error = np.sqrt(mean_squared_error(amplitudes_pred, amplitudes_Res))
################################################################################################################################################
# Set out the test example that is plotted in the delta function
full_pred = RS_Prediction[last_number]
full_result = y_test[last_number]
mass_pred = full_pred[:n_masses]
amp_pred = full_pred[n_masses:]
mass_res = full_result[:n_masses]
amp_res = full_result[n_masses:]
##################################################################################################################################################
# Plot delta function using previously calculated
lines_pred = []
for i in range(len(mass_pred)):
pair=[(mass_pred[i],0), (mass_pred[i], amp_pred[i])]
lines_pred.append(pair)
linecoll_pred = matcoll.LineCollection(lines_pred)
fig, ax = plt.subplots()
ax.add_collection(linecoll_pred)
lines_res = []
for i in range(len(mass_res)):
pair=[(mass_res[i],0), (mass_res[i], amp_res[i])]
lines_res.append(pair)
linecoll_res = matcoll.LineCollection(lines_res, colors='r')
ax.add_collection(linecoll_res)
plt.errorbar(mass_pred,amp_pred,xerr=x_error,yerr=y_error, fmt='o', ecolor='black',
capsize=4, barsabove=True, markersize=0.1)
plt.scatter(mass_res,amp_res, s=0.1)
#plt.xticks(mass_pred)
plt.ylim(0,1)
plt.xlabel("Mass")
plt.ylabel("Overlap integral")
plt.legend(loc='upper right')
#plt.savefig("Delta_function_10000.png", dpi=1000)
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.collections.LineCollection",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.errorbar",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",... | [((1539, 1574), 'numpy.concatenate', 'np.concatenate', (['masses_pred'], {'axis': '(0)'}), '(masses_pred, axis=0)\n', (1553, 1574), True, 'import numpy as np\n'), ((1576, 1615), 'numpy.concatenate', 'np.concatenate', (['amplitudes_pred'], {'axis': '(0)'}), '(amplitudes_pred, axis=0)\n', (1590, 1615), True, 'import numpy as np\n'), ((1617, 1651), 'numpy.concatenate', 'np.concatenate', (['masses_Res'], {'axis': '(0)'}), '(masses_Res, axis=0)\n', (1631, 1651), True, 'import numpy as np\n'), ((1653, 1691), 'numpy.concatenate', 'np.concatenate', (['amplitudes_Res'], {'axis': '(0)'}), '(amplitudes_Res, axis=0)\n', (1667, 1691), True, 'import numpy as np\n'), ((2590, 2624), 'matplotlib.collections.LineCollection', 'matcoll.LineCollection', (['lines_pred'], {}), '(lines_pred)\n', (2612, 2624), True, 'from matplotlib import collections as matcoll\n'), ((2635, 2649), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2647, 2649), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2872), 'matplotlib.collections.LineCollection', 'matcoll.LineCollection', (['lines_res'], {'colors': '"""r"""'}), "(lines_res, colors='r')\n", (2849, 2872), True, 'from matplotlib import collections as matcoll\n'), ((2906, 3039), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['mass_pred', 'amp_pred'], {'xerr': 'x_error', 'yerr': 'y_error', 'fmt': '"""o"""', 'ecolor': '"""black"""', 'capsize': '(4)', 'barsabove': '(True)', 'markersize': '(0.1)'}), "(mass_pred, amp_pred, xerr=x_error, yerr=y_error, fmt='o',\n ecolor='black', capsize=4, barsabove=True, markersize=0.1)\n", (2918, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3045, 3082), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mass_res', 'amp_res'], {'s': '(0.1)'}), '(mass_res, amp_res, s=0.1)\n', (3056, 3082), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3120), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (3114, 3120), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3138), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mass"""'], {}), "('Mass')\n", (3130, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3139, 3169), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Overlap integral"""'], {}), "('Overlap integral')\n", (3149, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3199), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3180, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3261), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3259, 3261), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1755), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['masses_pred', 'masses_Res'], {}), '(masses_pred, masses_Res)\n', (1730, 1755), False, 'from sklearn.metrics import mean_squared_error\n'), ((1775, 1826), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['amplitudes_pred', 'amplitudes_Res'], {}), '(amplitudes_pred, amplitudes_Res)\n', (1793, 1826), False, 'from sklearn.metrics import mean_squared_error\n')] |
import time
import math
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, date2num
import numpy as np
import subprocess as sp
import time
import sys
import os
from itertools import groupby
from sklearn.neighbors import KernelDensity
from sklearn.grid_search import GridSearchCV
fdate = sp.check_output(['date','--date','-1 day','+%Y%m%d'])
fdate = fdate.replace('\n','')
yr = int(fdate[:4])
mo = int(fdate[4:6])
dy = int(fdate[6:])
secsInWeek = 604800
secsInDay = 86400
gpsEpoch = (1980, 1, 6, 0, 0, 0) # (year, month, day, hh, mm, ss)
#*****NOTICE*****: LEAPSEC MUST BE CHANGED TO 18 ON JAN 1 2017
def gpsFromUTC(year, month, day, hour, minute, sec, leapSecs=18):
"""converts UTC to GPS second
Original function can be found at: http://software.ligo.org/docs/glue/frames.html
GPS time is basically measured in (atomic) seconds since
January 6, 1980, 00:00:00.0 (the GPS Epoch)
The GPS week starts on Saturday midnight (Sunday morning), and runs
for 604800 seconds.
Currently, GPS time is 17 seconds ahead of UTC
While GPS SVs transmit this difference and the date when another leap
second takes effect, the use of leap seconds cannot be predicted. This
routine is precise until the next leap second is introduced and has to be
updated after that.
SOW = Seconds of Week
SOD = Seconds of Day
Note: Python represents time in integer seconds, fractions are lost!!!
"""
secFract = sec % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple)
t = time.mktime((year, month, day, hour, minute, sec, -1, -1, 0))
# Note: time.mktime strictly works in localtime and to yield UTC, it should be
# corrected with time.timezone
# However, since we use the difference, this correction is unnecessary.
# Warning: trouble if daylight savings flag is set to -1 or 1 !!!
t = t + leapSecs
tdiff = t - t0
gpsSOW = (tdiff % secsInWeek) + secFract
gpsWeek = int(math.floor(tdiff/secsInWeek))
gpsDay = int(math.floor(gpsSOW/secsInDay))
gpsSOD = (gpsSOW % secsInDay)
gps_tuple = (gpsWeek, gpsSOW, gpsDay, gpsSOD)
return int(gps_tuple[0] * secsInWeek + gps_tuple[1])
def get_calib(evt_utc_time,mu_list):
#Convert event utc time stamp to datetime
evt_ds = datetime.datetime.strptime(evt_utc_time,'%Y/%m/%d_%H:%M:%S')
evt_gps = gpsFromUTC(evt_ds.year,evt_ds.month,evt_ds.day,evt_ds.hour,
evt_ds.minute,evt_ds.second)
#A30 base line
a30_bl = 512
mu_file = ''
for i in range(len(mu_list)-1):
mu_ds_t1 = datetime.datetime.strptime(mu_list[i].split('.')[0],'%Y%m%d_%H%M%S')
mu_ds_t2 = datetime.datetime.strptime(mu_list[i+1].split('.')[0],'%Y%m%d_%H%M%S')
if evt_ds > mu_ds_t1 and evt_ds < mu_ds_t2:
mu_file = mu_list[i]
mu_gps_start = mu_ds_t1
elif i == len(mu_list)-2 and evt_ds > mu_ds_t2:
mu_file = mu_list[i+1]
mu_gps_start = mu_ds_t2
else:
continue
if len(mu_file) > 0:
mu_ds_t1 = mu_gps_start
mu_gps = gpsFromUTC(mu_ds_t1.year,mu_ds_t1.month,mu_ds_t1.day,mu_ds_t1.hour,
mu_ds_t1.minute,mu_ds_t1.second)
start_index = (evt_gps - mu_gps) - 60
mu_full_path = '/home/augta/data/north/Muons/%s/'%fdate+mu_file
out_file = '/home/augta/web_monitor/tmp/muon_tmp.txt'
sp.call(['./anamu','-i','%s' %mu_full_path,'-o','%s' %out_file,
'--first','%i' %start_index,'--last','%i' %(start_index + 60)])
mu_data = np.loadtxt(out_file,usecols=(1,))
#Construct pulse height and charge histograms
peaks = np.zeros(3840)
charge = np.zeros(3840)
for i in range(3840):
tmp_trace = mu_data[i*63:(i+1)*63] - 511.5
peak_loc = tmp_trace.argmax()
peaks[i] = peak_loc + 1
charge[i] = tmp_trace.sum()
return charge,peaks
fname = "%i%02d%02d" %(yr,mo,dy)
os.chdir('/home/augta/web_monitor')
muon_dir = '/home/augta/data/north/Muons/%s/' %fname
muon_list = os.listdir(muon_dir)
muon_list.sort()
evt_list = os.listdir('/home/augta/data/north/Events/%s/' %fname)
evt_list = [k for k in evt_list if '.evt' in k]
glob_evt_list = [k for k in evt_list if 'global' in k]
glob_evt_list.sort()
local_evt_list = [k for k in evt_list if 'local' in k]
local_evt_list.sort()
nloc_dir = '/var/www/html/monitor/data/local_north/%s/' %fname
nglob_dir = '/var/www/html/monitor/data/global_north/%s/' %fname
sp.call(['mkdir',nloc_dir])
sp.call(['mkdir',nglob_dir])
bl_evt = 514
if len(local_evt_list) > 0:
for m in local_evt_list:
curr_file = '/home/augta/data/north/Events/%s/%s' %(fname,m)
fout = nloc_dir + m[:-4] + '.txt'
with open(fout,'w') as f:
sp.call(['./testevt',curr_file],stdout=f)
with open(fout,'r') as f:
gps_line = f.readline() # Get GPS information
gps_ts = gps_line.split(' ')[-2].replace(',','') # Select time stamp
utc_line = f.readline()
utc_date = utc_line.split(' ')[-2]
utc_time = utc_line.split(' ')[-1].split('.')[0]
utc = utc_date+'_'+utc_time
vem_charge,vem_peak = get_calib(utc,muon_list)
plt.subplot(121)
plt.hist(vem_peak,bins=np.arange(0,50),histtype='step')
xx,yy = np.histogram(vem_peak,np.arange(0,50))
vem_pulse_peak = xx.argmax() + 1
plt.xlabel('ADC counts')
plt.title('A30 Pulse height')
plt.subplot(122)
grid=GridSearchCV(KernelDensity(),{'bandwidth': np.linspace(5,80,30)},cv=10,n_jobs=2)
grid.fit(vem_charge[:,None])
kde=grid.best_estimator_
best_bw=grid.best_params_['bandwidth']
xgrid=np.linspace(0,vem_charge.max(),700)
pdf=np.exp(kde.score_samples(xgrid[:,None]))
plt.hist(vem_charge,bins=50,histtype='stepfilled',fc='gray',alpha=0.2,normed=True)
peak_pdf = pdf.argmax()
vem_charge_peak = xgrid[peak_pdf]
plt.plot(xgrid,pdf)
plt.vlines(vem_charge_peak,plt.ylim()[0],plt.ylim()[1])
plt.xlim(xmax=1000) #No need to go beyond 500 for current HV
plt.title('A30 Charge')
plt.tight_layout()
plt.savefig(nloc_dir+'calib_histogram_%s.png' %m[:-4])
data = np.loadtxt(fout,usecols=(4,),skiprows=3)
adc = data - bl_evt
ymax = adc.argmax()
signal = np.sum(adc[ymax-40:ymax+50]) / (vem_charge_peak / 30)
adc = adc / vem_charge_peak * (vem_pulse_peak+1)
x = np.arange(1024)
plt.step(x,adc)
plt.xlim(ymax-40,ymax+50)
plt.xlabel('Time [10 ns]')
plt.ylabel('Signal [VEM Peak]')
plt.title('Signal: %.3f VEM' %signal)
plt.savefig(nloc_dir + '%s_trace.png' %m[:-4])
plt.close('all')
locindex = m.split('_')[0]
np.savetxt(nloc_dir+'%s_pulse_height_hist.txt' %m[:-4],vem_peak)
np.savetxt(nloc_dir+'%s_charge_hist.txt' %m[:-4],vem_charge)
with open(nloc_dir + 'signal.txt','a') as f:
f.write('%s %s %.3f %.3f %.3f\n' %(locindex,gps_ts,signal,vem_charge_peak,vem_pulse_peak))
with open('north_local_signal.txt','a') as f:
f.write('%s %.3f\n' %(gps_ts,signal))
if len(glob_evt_list) > 0:
for m in glob_evt_list:
curr_file = '/home/augta/data/north/Events/%s/%s' %(fname,m)
fout = nglob_dir + m[:-4] + '.txt'
with open(fout,'w') as f:
sp.call(['./testevt',curr_file],stdout=f)
with open(fout,'r') as f:
gps_line = f.readline() # Get GPS information
gps_ts = gps_line.split(' ')[-2].replace(',','') # Select time stamp
utc_line = f.readline()
utc_date = utc_line.split(' ')[-2]
utc_time = utc_line.split(' ')[-1].split('.')[0]
utc = utc_date+'_'+utc_time
vem_charge,vem_peak = get_calib(utc,muon_list)
plt.subplot(121)
plt.hist(vem_peak,bins=np.arange(0,50),histtype='step')
xx,yy = np.histogram(vem_peak,np.arange(0,50))
vem_pulse_peak = xx.argmax() + 1
plt.xlabel('ADC counts')
plt.title('A30 Pulse height')
plt.subplot(122)
grid=GridSearchCV(KernelDensity(),{'bandwidth': np.linspace(5,80,30)},cv=10,n_jobs=2)
grid.fit(vem_charge[:,None])
kde=grid.best_estimator_
best_bw=grid.best_params_['bandwidth']
xgrid=np.linspace(0,vem_charge.max(),700)
pdf=np.exp(kde.score_samples(xgrid[:,None]))
plt.hist(vem_charge,bins=50,histtype='stepfilled',fc='gray',alpha=0.2,normed=True)
peak_pdf = pdf.argmax()
vem_charge_peak = xgrid[peak_pdf]
plt.plot(xgrid,pdf)
plt.vlines(vem_charge_peak,plt.ylim()[0],plt.ylim()[1])
plt.title('A30 Charge')
plt.xlim(xmax=1000)
plt.tight_layout()
plt.savefig(nglob_dir+'calib_histogram_%s.png' %m[:-4])
plt.close('all')
data = np.loadtxt(fout,usecols=(4,),skiprows=3)
adc = data - bl_evt
ymax = adc.argmax()
signal = np.sum(adc[ymax-40:ymax+50])/(vem_charge_peak / 30)
adc = adc / vem_charge_peak * (vem_pulse_peak+1)
x = np.arange(1024)
plt.step(x,adc)
plt.xlim(ymax-40,ymax+50)
plt.xlabel('Time [10 ns]')
plt.ylabel('Signal [VEM Peak]')
plt.title('Signal: %.3f VEM' %signal)
plt.savefig(nglob_dir + '%s_trace.png' %m[:-4])
plt.close('all')
locindex = m.split('_')[0]
np.savetxt(nglob_dir+'%s_pulse_height_hist.txt' %m[:-4],vem_peak)
np.savetxt(nglob_dir+'%s_charge_hist.txt' %m[:-4],vem_charge)
with open(nglob_dir + 'signal.txt','a') as f:
f.write('%s %s %.3f %.3f %.3f\n' %(locindex,gps_ts,signal,vem_charge_peak,vem_pulse_peak))
with open('north_global_signal.txt','a') as f:
f.write('%s %.3f\n' %(gps_ts,signal)) | [
"matplotlib.pyplot.hist",
"math.floor",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.neighbors.KernelDensity",
"matplotlib.pyplot.close",
"numpy.linspace",
"subprocess.call",
"matplotlib.pyplot.ylim",
"subprocess.che... | [((58, 79), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (72, 79), False, 'import matplotlib\n'), ((369, 425), 'subprocess.check_output', 'sp.check_output', (["['date', '--date', '-1 day', '+%Y%m%d']"], {}), "(['date', '--date', '-1 day', '+%Y%m%d'])\n", (384, 425), True, 'import subprocess as sp\n'), ((3755, 3790), 'os.chdir', 'os.chdir', (['"""/home/augta/web_monitor"""'], {}), "('/home/augta/web_monitor')\n", (3763, 3790), False, 'import os\n'), ((3856, 3876), 'os.listdir', 'os.listdir', (['muon_dir'], {}), '(muon_dir)\n', (3866, 3876), False, 'import os\n'), ((3906, 3961), 'os.listdir', 'os.listdir', (["('/home/augta/data/north/Events/%s/' % fname)"], {}), "('/home/augta/data/north/Events/%s/' % fname)\n", (3916, 3961), False, 'import os\n'), ((4291, 4319), 'subprocess.call', 'sp.call', (["['mkdir', nloc_dir]"], {}), "(['mkdir', nloc_dir])\n", (4298, 4319), True, 'import subprocess as sp\n'), ((4319, 4348), 'subprocess.call', 'sp.call', (["['mkdir', nglob_dir]"], {}), "(['mkdir', nglob_dir])\n", (4326, 4348), True, 'import subprocess as sp\n'), ((1557, 1580), 'time.mktime', 'time.mktime', (['epochTuple'], {}), '(epochTuple)\n', (1568, 1580), False, 'import time\n'), ((1587, 1648), 'time.mktime', 'time.mktime', (['(year, month, day, hour, minute, sec, -1, -1, 0)'], {}), '((year, month, day, hour, minute, sec, -1, -1, 0))\n', (1598, 1648), False, 'import time\n'), ((2309, 2370), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['evt_utc_time', '"""%Y/%m/%d_%H:%M:%S"""'], {}), "(evt_utc_time, '%Y/%m/%d_%H:%M:%S')\n", (2335, 2370), False, 'import datetime\n'), ((2012, 2042), 'math.floor', 'math.floor', (['(tdiff / secsInWeek)'], {}), '(tdiff / secsInWeek)\n', (2022, 2042), False, 'import math\n'), ((2056, 2086), 'math.floor', 'math.floor', (['(gpsSOW / secsInDay)'], {}), '(gpsSOW / secsInDay)\n', (2066, 2086), False, 'import math\n'), ((3263, 3405), 'subprocess.call', 'sp.call', (["['./anamu', '-i', '%s' % mu_full_path, '-o', '%s' % out_file, '--first', \n '%i' % start_index, '--last', '%i' % (start_index + 60)]"], {}), "(['./anamu', '-i', '%s' % mu_full_path, '-o', '%s' % out_file,\n '--first', '%i' % start_index, '--last', '%i' % (start_index + 60)])\n", (3270, 3405), True, 'import subprocess as sp\n'), ((3406, 3440), 'numpy.loadtxt', 'np.loadtxt', (['out_file'], {'usecols': '(1,)'}), '(out_file, usecols=(1,))\n', (3416, 3440), True, 'import numpy as np\n'), ((3498, 3512), 'numpy.zeros', 'np.zeros', (['(3840)'], {}), '(3840)\n', (3506, 3512), True, 'import numpy as np\n'), ((3524, 3538), 'numpy.zeros', 'np.zeros', (['(3840)'], {}), '(3840)\n', (3532, 3538), True, 'import numpy as np\n'), ((4935, 4951), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (4946, 4951), True, 'import matplotlib.pyplot as plt\n'), ((5096, 5120), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ADC counts"""'], {}), "('ADC counts')\n", (5106, 5120), True, 'import matplotlib.pyplot as plt\n'), ((5123, 5152), 'matplotlib.pyplot.title', 'plt.title', (['"""A30 Pulse height"""'], {}), "('A30 Pulse height')\n", (5132, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5155, 5171), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (5166, 5171), True, 'import matplotlib.pyplot as plt\n'), ((5452, 5543), 'matplotlib.pyplot.hist', 'plt.hist', (['vem_charge'], {'bins': '(50)', 'histtype': '"""stepfilled"""', 'fc': '"""gray"""', 'alpha': '(0.2)', 'normed': '(True)'}), "(vem_charge, bins=50, histtype='stepfilled', fc='gray', alpha=0.2,\n normed=True)\n", (5460, 5543), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5619), 'matplotlib.pyplot.plot', 'plt.plot', (['xgrid', 'pdf'], {}), '(xgrid, pdf)\n', (5607, 5619), True, 'import matplotlib.pyplot as plt\n'), ((5679, 5698), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmax': '(1000)'}), '(xmax=1000)\n', (5687, 5698), True, 'import matplotlib.pyplot as plt\n'), ((5742, 5765), 'matplotlib.pyplot.title', 'plt.title', (['"""A30 Charge"""'], {}), "('A30 Charge')\n", (5751, 5765), True, 'import matplotlib.pyplot as plt\n'), ((5768, 5786), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5784, 5786), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5846), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(nloc_dir + 'calib_histogram_%s.png' % m[:-4])"], {}), "(nloc_dir + 'calib_histogram_%s.png' % m[:-4])\n", (5800, 5846), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5895), 'numpy.loadtxt', 'np.loadtxt', (['fout'], {'usecols': '(4,)', 'skiprows': '(3)'}), '(fout, usecols=(4,), skiprows=3)\n', (5863, 5895), True, 'import numpy as np\n'), ((6060, 6075), 'numpy.arange', 'np.arange', (['(1024)'], {}), '(1024)\n', (6069, 6075), True, 'import numpy as np\n'), ((6078, 6094), 'matplotlib.pyplot.step', 'plt.step', (['x', 'adc'], {}), '(x, adc)\n', (6086, 6094), True, 'import matplotlib.pyplot as plt\n'), ((6096, 6126), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(ymax - 40)', '(ymax + 50)'], {}), '(ymax - 40, ymax + 50)\n', (6104, 6126), True, 'import matplotlib.pyplot as plt\n'), ((6124, 6150), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [10 ns]"""'], {}), "('Time [10 ns]')\n", (6134, 6150), True, 'import matplotlib.pyplot as plt\n'), ((6153, 6184), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal [VEM Peak]"""'], {}), "('Signal [VEM Peak]')\n", (6163, 6184), True, 'import matplotlib.pyplot as plt\n'), ((6187, 6225), 'matplotlib.pyplot.title', 'plt.title', (["('Signal: %.3f VEM' % signal)"], {}), "('Signal: %.3f VEM' % signal)\n", (6196, 6225), True, 'import matplotlib.pyplot as plt\n'), ((6227, 6274), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(nloc_dir + '%s_trace.png' % m[:-4])"], {}), "(nloc_dir + '%s_trace.png' % m[:-4])\n", (6238, 6274), True, 'import matplotlib.pyplot as plt\n'), ((6276, 6292), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6285, 6292), True, 'import matplotlib.pyplot as plt\n'), ((6324, 6392), 'numpy.savetxt', 'np.savetxt', (["(nloc_dir + '%s_pulse_height_hist.txt' % m[:-4])", 'vem_peak'], {}), "(nloc_dir + '%s_pulse_height_hist.txt' % m[:-4], vem_peak)\n", (6334, 6392), True, 'import numpy as np\n'), ((6391, 6455), 'numpy.savetxt', 'np.savetxt', (["(nloc_dir + '%s_charge_hist.txt' % m[:-4])", 'vem_charge'], {}), "(nloc_dir + '%s_charge_hist.txt' % m[:-4], vem_charge)\n", (6401, 6455), True, 'import numpy as np\n'), ((7255, 7271), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (7266, 7271), True, 'import matplotlib.pyplot as plt\n'), ((7416, 7440), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ADC counts"""'], {}), "('ADC counts')\n", (7426, 7440), True, 'import matplotlib.pyplot as plt\n'), ((7443, 7472), 'matplotlib.pyplot.title', 'plt.title', (['"""A30 Pulse height"""'], {}), "('A30 Pulse height')\n", (7452, 7472), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7491), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (7486, 7491), True, 'import matplotlib.pyplot as plt\n'), ((7772, 7863), 'matplotlib.pyplot.hist', 'plt.hist', (['vem_charge'], {'bins': '(50)', 'histtype': '"""stepfilled"""', 'fc': '"""gray"""', 'alpha': '(0.2)', 'normed': '(True)'}), "(vem_charge, bins=50, histtype='stepfilled', fc='gray', alpha=0.2,\n normed=True)\n", (7780, 7863), True, 'import matplotlib.pyplot as plt\n'), ((7919, 7939), 'matplotlib.pyplot.plot', 'plt.plot', (['xgrid', 'pdf'], {}), '(xgrid, pdf)\n', (7927, 7939), True, 'import matplotlib.pyplot as plt\n'), ((7999, 8022), 'matplotlib.pyplot.title', 'plt.title', (['"""A30 Charge"""'], {}), "('A30 Charge')\n", (8008, 8022), True, 'import matplotlib.pyplot as plt\n'), ((8025, 8044), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmax': '(1000)'}), '(xmax=1000)\n', (8033, 8044), True, 'import matplotlib.pyplot as plt\n'), ((8047, 8065), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8063, 8065), True, 'import matplotlib.pyplot as plt\n'), ((8068, 8126), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(nglob_dir + 'calib_histogram_%s.png' % m[:-4])"], {}), "(nglob_dir + 'calib_histogram_%s.png' % m[:-4])\n", (8079, 8126), True, 'import matplotlib.pyplot as plt\n'), ((8126, 8142), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8135, 8142), True, 'import matplotlib.pyplot as plt\n'), ((8152, 8194), 'numpy.loadtxt', 'np.loadtxt', (['fout'], {'usecols': '(4,)', 'skiprows': '(3)'}), '(fout, usecols=(4,), skiprows=3)\n', (8162, 8194), True, 'import numpy as np\n'), ((8357, 8372), 'numpy.arange', 'np.arange', (['(1024)'], {}), '(1024)\n', (8366, 8372), True, 'import numpy as np\n'), ((8375, 8391), 'matplotlib.pyplot.step', 'plt.step', (['x', 'adc'], {}), '(x, adc)\n', (8383, 8391), True, 'import matplotlib.pyplot as plt\n'), ((8393, 8423), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(ymax - 40)', '(ymax + 50)'], {}), '(ymax - 40, ymax + 50)\n', (8401, 8423), True, 'import matplotlib.pyplot as plt\n'), ((8421, 8447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [10 ns]"""'], {}), "('Time [10 ns]')\n", (8431, 8447), True, 'import matplotlib.pyplot as plt\n'), ((8450, 8481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal [VEM Peak]"""'], {}), "('Signal [VEM Peak]')\n", (8460, 8481), True, 'import matplotlib.pyplot as plt\n'), ((8484, 8522), 'matplotlib.pyplot.title', 'plt.title', (["('Signal: %.3f VEM' % signal)"], {}), "('Signal: %.3f VEM' % signal)\n", (8493, 8522), True, 'import matplotlib.pyplot as plt\n'), ((8524, 8572), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(nglob_dir + '%s_trace.png' % m[:-4])"], {}), "(nglob_dir + '%s_trace.png' % m[:-4])\n", (8535, 8572), True, 'import matplotlib.pyplot as plt\n'), ((8574, 8590), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8583, 8590), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8691), 'numpy.savetxt', 'np.savetxt', (["(nglob_dir + '%s_pulse_height_hist.txt' % m[:-4])", 'vem_peak'], {}), "(nglob_dir + '%s_pulse_height_hist.txt' % m[:-4], vem_peak)\n", (8632, 8691), True, 'import numpy as np\n'), ((8690, 8755), 'numpy.savetxt', 'np.savetxt', (["(nglob_dir + '%s_charge_hist.txt' % m[:-4])", 'vem_charge'], {}), "(nglob_dir + '%s_charge_hist.txt' % m[:-4], vem_charge)\n", (8700, 8755), True, 'import numpy as np\n'), ((4546, 4589), 'subprocess.call', 'sp.call', (["['./testevt', curr_file]"], {'stdout': 'f'}), "(['./testevt', curr_file], stdout=f)\n", (4553, 4589), True, 'import subprocess as sp\n'), ((5042, 5058), 'numpy.arange', 'np.arange', (['(0)', '(50)'], {}), '(0, 50)\n', (5051, 5058), True, 'import numpy as np\n'), ((5192, 5207), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (5205, 5207), False, 'from sklearn.neighbors import KernelDensity\n'), ((5949, 5981), 'numpy.sum', 'np.sum', (['adc[ymax - 40:ymax + 50]'], {}), '(adc[ymax - 40:ymax + 50])\n', (5955, 5981), True, 'import numpy as np\n'), ((6866, 6909), 'subprocess.call', 'sp.call', (["['./testevt', curr_file]"], {'stdout': 'f'}), "(['./testevt', curr_file], stdout=f)\n", (6873, 6909), True, 'import subprocess as sp\n'), ((7362, 7378), 'numpy.arange', 'np.arange', (['(0)', '(50)'], {}), '(0, 50)\n', (7371, 7378), True, 'import numpy as np\n'), ((7512, 7527), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (7525, 7527), False, 'from sklearn.neighbors import KernelDensity\n'), ((8248, 8280), 'numpy.sum', 'np.sum', (['adc[ymax - 40:ymax + 50]'], {}), '(adc[ymax - 40:ymax + 50])\n', (8254, 8280), True, 'import numpy as np\n'), ((4977, 4993), 'numpy.arange', 'np.arange', (['(0)', '(50)'], {}), '(0, 50)\n', (4986, 4993), True, 'import numpy as np\n'), ((5222, 5244), 'numpy.linspace', 'np.linspace', (['(5)', '(80)', '(30)'], {}), '(5, 80, 30)\n', (5233, 5244), True, 'import numpy as np\n'), ((5648, 5658), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (5656, 5658), True, 'import matplotlib.pyplot as plt\n'), ((5662, 5672), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (5670, 5672), True, 'import matplotlib.pyplot as plt\n'), ((7297, 7313), 'numpy.arange', 'np.arange', (['(0)', '(50)'], {}), '(0, 50)\n', (7306, 7313), True, 'import numpy as np\n'), ((7542, 7564), 'numpy.linspace', 'np.linspace', (['(5)', '(80)', '(30)'], {}), '(5, 80, 30)\n', (7553, 7564), True, 'import numpy as np\n'), ((7968, 7978), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (7976, 7978), True, 'import matplotlib.pyplot as plt\n'), ((7982, 7992), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (7990, 7992), True, 'import matplotlib.pyplot as plt\n')] |
import json
import sys
import numpy
import copy
#extractors = {0: {"precision": 0.5, "recall": 0.5}}
#sources = {0: {"KBT": 0.5, "triples": [[0,0,0], [0,1,None]]}}
#Correct triples have a value of 0, incorrect triples have a value of 1 through 25
#format = {0: {0: [], 1: [], 2: []} }
def generateTriples(quantity):
triples = []
for i in range(1, quantity + 1):
triples.append([i, i, i])
return triples
#Randomly shuffles triples
def generateSource(allTriples, accuracy = 0.7):
triples = copy.deepcopy(allTriples)
numpy.random.default_rng().shuffle(triples)
for triple in triples:
if numpy.random.default_rng().integers(0, 100)/100 > accuracy:
tmp = numpy.random.default_rng().integers(0, 2)
if tmp == 0:
triple[0] = 0
elif tmp == 1:
triple[1] = 0
else:
triple[2] = 0
return {"KBT": 0.7, "triples": triples}
#Generates an extractor with a precision and recall of [0.1, 1.0) uniformly
def generateExtractor():
return {"precision": 0.5, "recall": 0.5}
def extract(extractor, source):
extractedTriples = []
for triple in source["triples"]:
if numpy.random.default_rng().integers(0, 100)/100 > extractor["recall"]:
extractedTriples.append(copy.deepcopy(triple))
for i in range(len(extractedTriples[-1])):
if numpy.random.default_rng().integers(0, 100)/100 > numpy.cbrt(extractor["precision"]):
extractedTriples[-1][i] = -1
numpy.random.default_rng().shuffle(extractedTriples)
return extractedTriples
def main():
if len(sys.argv) != 4:
print("Usage:", sys.argv[0], "[number of triples] [number of sources] [number of extractors]")
exit()
triples = []
sources = {}
extractors = {}
multilayerinput = {}
print("Generating triples...")
triples = generateTriples(int(sys.argv[1]))
print("Completed!\n")
print("Generating sources...")
for i in range(int(sys.argv[2])):
sources[i] = generateSource(triples)
print("Completed!\n")
print("Generating extractors...")
for i in range(int(sys.argv[3])):
extractors[i] = generateExtractor()
print("Completed!\n")
print("Extracting triples from sources...")
for extractorID in range(int(sys.argv[3])):
tmp = {}
for sourceID in range(int(sys.argv[2])):
if numpy.random.default_rng().integers(0, 100)/100 > 0.5:
tmp[sourceID] = extract(extractors[extractorID], sources[sourceID])
multilayerinput[extractorID] = tmp
print("Completed!\n")
print("Writing to files...")
with open("triples.json", "w") as triplesFile, open("sources.json", "w") as sourcesFile, open("extractors.json", "w") as extractorsFile, open("multilayerinput.json", "w") as multilayerInputFile:
json.dump(triples, triplesFile, indent = 2)
json.dump(sources, sourcesFile, indent = 2)
json.dump(extractors, extractorsFile, indent = 2)
json.dump(multilayerinput, multilayerInputFile, indent = 2)
print("Completed!")
if __name__ == "__main__":
main()
| [
"numpy.cbrt",
"numpy.random.default_rng",
"json.dump",
"copy.deepcopy"
] | [((516, 541), 'copy.deepcopy', 'copy.deepcopy', (['allTriples'], {}), '(allTriples)\n', (529, 541), False, 'import copy\n'), ((2898, 2939), 'json.dump', 'json.dump', (['triples', 'triplesFile'], {'indent': '(2)'}), '(triples, triplesFile, indent=2)\n', (2907, 2939), False, 'import json\n'), ((2950, 2991), 'json.dump', 'json.dump', (['sources', 'sourcesFile'], {'indent': '(2)'}), '(sources, sourcesFile, indent=2)\n', (2959, 2991), False, 'import json\n'), ((3002, 3049), 'json.dump', 'json.dump', (['extractors', 'extractorsFile'], {'indent': '(2)'}), '(extractors, extractorsFile, indent=2)\n', (3011, 3049), False, 'import json\n'), ((3060, 3117), 'json.dump', 'json.dump', (['multilayerinput', 'multilayerInputFile'], {'indent': '(2)'}), '(multilayerinput, multilayerInputFile, indent=2)\n', (3069, 3117), False, 'import json\n'), ((546, 572), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (570, 572), False, 'import numpy\n'), ((1550, 1576), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (1574, 1576), False, 'import numpy\n'), ((1314, 1335), 'copy.deepcopy', 'copy.deepcopy', (['triple'], {}), '(triple)\n', (1327, 1335), False, 'import copy\n'), ((706, 732), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (730, 732), False, 'import numpy\n'), ((1461, 1495), 'numpy.cbrt', 'numpy.cbrt', (["extractor['precision']"], {}), "(extractor['precision'])\n", (1471, 1495), False, 'import numpy\n'), ((628, 654), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (652, 654), False, 'import numpy\n'), ((1207, 1233), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (1231, 1233), False, 'import numpy\n'), ((2449, 2475), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (2473, 2475), False, 'import numpy\n'), ((1411, 1437), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (1435, 1437), False, 'import numpy\n')] |
# Add this project to the path
import os; import sys; currDir = os.path.dirname(os.path.realpath("__file__"))
rootDir = os.path.abspath(os.path.join(currDir, '..')); sys.path.insert(1, rootDir)
# Warnings
import warnings
warnings.filterwarnings("ignore")
# My modules
from features.build_features import *
# Public modules
from sklearn.model_selection import GridSearchCV
import numpy as np
from pandas import read_csv
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, learning_curve
from sklearn.metrics import precision_recall_curve, confusion_matrix, \
precision_score, recall_score
from sklearn.model_selection import cross_val_predict
from numpy.random import seed
import lightgbm as lgb
# Inputs
SHOW_ERROR_ANALYSIS = True
# Extract
seed(40)
train = read_csv("../../data/interim/train.csv")
train_y = train[["y"]].values
dev = read_csv("../../data/interim/dev.csv")
dev_y = dev[["y"]].values
# Data parameters
features_pipeline = data_preparation()
# Model parameters
full_pipeline = Pipeline([
("features", features_pipeline),
("clf", lgb.LGBMClassifier(class_weight='balanced')),
])
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.model_selection import train_test_split
# Set seed
np.random.seed(40)
# Initialize
max_score = 0.0
initial_n_estimators = 50
best_params = {'n_estimators':initial_n_estimators, 'learning_rate':0.1, 'max_depth':6, 'subsample':1.00, 'colsample_bytree':1.0, 'reg_lambda':1}
def get_score(params):
model = Pipeline([
("features", features_pipeline),
("clf", lgb.LGBMClassifier(class_weight='balanced', **params)),
])
print("Fitting model")
print(params)
model.fit(train, train_y)
prob_y = model.predict_proba(dev)[:, 1]
precision, recall, _ = precision_recall_curve(dev_y, prob_y, pos_label=1)
score = max([y for (x,y) in zip(precision, recall) if x >= 0.20])
return score
# 1) Tune max depth
params = best_params.copy()
for max_depth in [1, 2, 4, 6, 8]:
params['max_depth'] = max_depth
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned max depth and have a new max score: %.3f" % score)
# 2) Tune subsample
params = best_params.copy()
for subsample in [0.40, 0.60, 0.80, 0.90]:
params["subsample"] = subsample
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned subsample and have a new max score: %.3f" % score)
# 3) Tune n_estimators
params = best_params.copy()
for n_estimators in [1.1, 1.3]:
params["n_estimators"] = int(initial_n_estimators*n_estimators)
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned n_estimators and have a new max score: %.3f" % score)
# 4) Tune learning rate
params = best_params.copy()
for n_estimators, learning_rate in zip([initial_n_estimators*1.4, int(initial_n_estimators/1.4)], [0.07, 0.15]):
params["n_estimators"] = int(n_estimators)
params["learning_rate"] = learning_rate
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned learning rate and have a new max score: %.3f" % score)
# 5) Tune n_estimators again
params = best_params.copy()
for n_estimators in [int(1.1*initial_n_estimators), int(1.2*initial_n_estimators)]:
params["n_estimators"] = n_estimators
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned n_estimators again and have a new max score: %.3f" % score)
# 6) Tune sampling by tree
params = best_params.copy()
for colsample_bytree in [0.6, 0.8, 0.9]:
params["colsample_bytree"] = colsample_bytree
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned colsample_bytree and have a new max score: %.3f" % score)
# 7) Tune subsample again
params = best_params.copy()
for subsample in [0.6, 0.75, 0.9]:
params["subsample"] = subsample
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned subsample again and have a new max score: %.3f" % score)
# 9) Tune sampling fields
params = best_params.copy()
subsample_ = 0.9 if best_params["subsample"] == 1.0 else best_params["subsample"]
colsample_bytree_ = 0.6 if best_params["colsample_bytree"] == 0.5 else best_params["colsample_bytree"]
for subsample in [subsample_, subsample_ + 0.1]:
for colsample_bytree in [colsample_bytree_ - 0.1, colsample_bytree_]:
params["subsample"] = subsample
params["colsample_bytree"] = colsample_bytree
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned sampling fields and have a new max score: %.3f" % score)
# 10) Tune alpha
params = best_params.copy()
for reg_lambda in [3, 10, 33, 100, 300]:
params["reg_lambda"] = reg_lambda
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned alpha and have a new max score: %.3f" % score)
# 11) Tune trees
params = best_params.copy()
up = 1
for i in range(5):
params["n_estimators"] = int(params["n_estimators"] * (1.4 - 0.05*i) if up == 1 else params["n_estimators"] / (1.4 - 0.05*i))
score = get_score(params)
if score > max_score:
max_score = score
best_params = params.copy()
print("Tuned n_estimators and have a new max score: %.3f" % score)
else:
up = 0 if up == 1 else 1
print("Max score: %.3f" % max_score)
print("Best params: %s" % best_params)
full_pipeline = Pipeline([
("features", features_pipeline),
("clf", lgb.LGBMClassifier(class_weight='balanced', **best_params)),
])
full_pipeline = Pipeline([
("features", features_pipeline),
("clf", lgb.LGBMClassifier(class_weight='balanced')),
])
# Tune features pipeline parameters
parameters = {
'features__num_pipeline__new_numeric_attribs_adder__add_age_booleans': [True, False],
'features__hybrid_pipeline__new_hybrid_attribs_adder__add_income': [True, False],
'features__cat_pipeline__drop_unimportant_category_values__drop': [True, False],
'features__cat_pipeline__cat_encoder__drop': ['first', None]
}
gs = GridSearchCV(full_pipeline, parameters, cv=5)
gs.fit(train, train_y)
print("Best params: %s" % gs.best_params_)
full_pipeline = Pipeline([
("features", features_pipeline),
("clf", lgb.LGBMClassifier(class_weight='balanced', **best_params, **gs.best_params_)),
])
# Fit
full_pipeline.fit(train, train_y)
# Predict
precision_threshold = 0.20
prob_y = full_pipeline.predict_proba(dev)[:, 1]
precision, recall, thresholds = precision_recall_curve(dev_y, prob_y, pos_label=1)
print(precision, recall, thresholds)
score = max([y for (x,y) in zip(precision, recall) if x >= precision_threshold])
print('Recall score: %.3f' % score)
# Error analysis
if SHOW_ERROR_ANALYSIS:
precision_threshold_index = min([i for (x,i) in zip(precision, range(len(precision))) if x >= precision_threshold])
dev["prob_y"] = prob_y
prob_y_threshold = (list(thresholds) + [1.1])[precision_threshold_index]
pred_y = (prob_y >= prob_y_threshold).astype(bool)
print("Prob y Threshold: %.1f" % (prob_y_threshold*100))
print(confusion_matrix(dev_y, pred_y))
print("Recall: %.1f" % (recall_score(dev_y, pred_y)*100))
print("Precision: %.1f" % (precision_score(dev_y, pred_y)*100))
| [
"sklearn.model_selection.GridSearchCV",
"sys.path.insert",
"pandas.read_csv",
"sklearn.metrics.precision_recall_curve",
"os.path.join",
"lightgbm.LGBMClassifier",
"sklearn.metrics.precision_score",
"os.path.realpath",
"sklearn.metrics.recall_score",
"numpy.random.seed",
"warnings.simplefilter",
... | [((166, 193), 'sys.path.insert', 'sys.path.insert', (['(1)', 'rootDir'], {}), '(1, rootDir)\n', (181, 193), False, 'import sys\n'), ((222, 255), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (245, 255), False, 'import warnings\n'), ((897, 905), 'numpy.random.seed', 'seed', (['(40)'], {}), '(40)\n', (901, 905), False, 'from numpy.random import seed\n'), ((914, 954), 'pandas.read_csv', 'read_csv', (['"""../../data/interim/train.csv"""'], {}), "('../../data/interim/train.csv')\n", (922, 954), False, 'from pandas import read_csv\n'), ((991, 1029), 'pandas.read_csv', 'read_csv', (['"""../../data/interim/dev.csv"""'], {}), "('../../data/interim/dev.csv')\n", (999, 1029), False, 'from pandas import read_csv\n'), ((1285, 1347), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (1306, 1347), False, 'import warnings\n'), ((1413, 1431), 'numpy.random.seed', 'np.random.seed', (['(40)'], {}), '(40)\n', (1427, 1431), True, 'import numpy as np\n'), ((6708, 6753), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['full_pipeline', 'parameters'], {'cv': '(5)'}), '(full_pipeline, parameters, cv=5)\n', (6720, 6753), False, 'from sklearn.model_selection import GridSearchCV\n'), ((7144, 7194), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['dev_y', 'prob_y'], {'pos_label': '(1)'}), '(dev_y, prob_y, pos_label=1)\n', (7166, 7194), False, 'from sklearn.metrics import precision_recall_curve, confusion_matrix, precision_score, recall_score\n'), ((80, 108), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (96, 108), False, 'import os\n'), ((136, 163), 'os.path.join', 'os.path.join', (['currDir', '""".."""'], {}), "(currDir, '..')\n", (148, 163), False, 'import os\n'), ((1947, 1997), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['dev_y', 'prob_y'], {'pos_label': '(1)'}), '(dev_y, prob_y, pos_label=1)\n', (1969, 1997), False, 'from sklearn.metrics import precision_recall_curve, confusion_matrix, precision_score, recall_score\n'), ((7741, 7772), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['dev_y', 'pred_y'], {}), '(dev_y, pred_y)\n', (7757, 7772), False, 'from sklearn.metrics import precision_recall_curve, confusion_matrix, precision_score, recall_score\n'), ((1210, 1253), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (1228, 1253), True, 'import lightgbm as lgb\n'), ((6132, 6190), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced', **best_params)\n", (6150, 6190), True, 'import lightgbm as lgb\n'), ((6273, 6316), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (6291, 6316), True, 'import lightgbm as lgb\n'), ((6897, 6974), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced', **best_params, **gs.best_params_)\n", (6915, 6974), True, 'import lightgbm as lgb\n'), ((1738, 1791), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced', **params)\n", (1756, 1791), True, 'import lightgbm as lgb\n'), ((7802, 7829), 'sklearn.metrics.recall_score', 'recall_score', (['dev_y', 'pred_y'], {}), '(dev_y, pred_y)\n', (7814, 7829), False, 'from sklearn.metrics import precision_recall_curve, confusion_matrix, precision_score, recall_score\n'), ((7867, 7897), 'sklearn.metrics.precision_score', 'precision_score', (['dev_y', 'pred_y'], {}), '(dev_y, pred_y)\n', (7882, 7897), False, 'from sklearn.metrics import precision_recall_curve, confusion_matrix, precision_score, recall_score\n')] |
# coding: utf-8
# # Broadcasting a spectrum - Two spectral Components model
# In[ ]:
from astropy.io import fits
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from scipy.stats import chisquare
from PyAstronomy.pyasl import dopplerShift
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib')
# In[ ]:
def two_comp_model(wav, model1, model2, alphas, rvs, gammas):
# Make 2 component simulations, broadcasting over alpha, rv, gamma values.
# Enable single scalar inputs (turn to 1d np.array)
if not hasattr(alphas, "__len__"):
alphas = np.asarray(alphas)[np.newaxis]
if not hasattr(rvs, "__len__"):
rvs = np.asarray(rvs)[np.newaxis]
if not hasattr(gammas, "__len__"):
gammas = np.asarray(gammas)[np.newaxis]
# print(len(gammas))
am2 = model2[:,np.newaxis] * alphas # alpha * Model2 (am2)
# print(am2.shape)
am2rv = np.empty(am2.shape + (len(rvs),)) # am2rv = am2 with rv doppler-shift
# print(am2rv.shape)
for i, rv in enumerate(rvs):
#nflux, wlprime = dopplerShift(wav, am2, rv)
#am2rv[:, :, i] = nflux
wav_i = (1 + rv / c) * wav
am2rv[:, :, i] = interp1d(wav_i, am2, axis=0, bounds_error=False)(wav)
# Normalize by (1 / 1 + alpha)
am2rv = am2rv / (1 + alphas)[np.newaxis, :, np.newaxis]
am2rvm1 = h[:, np.newaxis, np.newaxis] + am2rv # am2rvm1 = am2rv + model_1
# print(am2rvm1.shape)
am2rvm1g = np.empty(am2rvm1.shape + (len(gammas),)) # am2rvm1g = am2rvm1 with gamma doppler-shift
for j, gamma in enumerate(gammas):
wav_j = (1 + gamma / 299792.458) * wav
am2rvm1g[:, :, :, j] = interp1d(wav_j, am2rvm1, axis=0, bounds_error=False)(wav)
return interp1d(w, am2rvm1g, axis=0) # pass it the wavelength values to return
# In[ ]:
wav = "/home/jneal/Phd/data/phoenixmodels/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"
host = "/home/jneal/Phd/data/phoenixmodels/HD30501-lte05200-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
comp = "/home/jneal/Phd/data/phoenixmodels/HD30501b-lte02500-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
w = fits.getdata(wav) / 10
h = fits.getdata(host)
c = fits.getdata(comp)
# In[ ]:
mask = (2111 < w) & (w < 2117)
w = w[mask]
h = h[mask]
c = c[mask]
# crude normalization
h = h/np.max(h)
c = c/np.max(c)
# In[ ]:
# Create a simulated spectrum
# Parameters
c_kms = 299792.458 # km/s
s_alpha = np.array([0.1])
s_rv = np.array([1.5])
s_gamma = np.array([0.5])
answers = (s_alpha, s_rv, s_gamma)
# COMPACT SIMULATION
comp = interp1d((1 + s_rv / c_kms) * w, s_alpha * c, bounds_error=False)(w)
Sim_func = interp1d((1 + s_gamma / c_kms) * w, (h + comp) / (1 + s_alpha), bounds_error=False, axis=0)
sim_f_orgw = Sim_func(w)
sim_w = np.linspace(2114, 2115, 1024)
sim_f = Sim_func(sim_w)
# In[ ]:
# Compare output to tcm
tcm_sim_f = two_comp_model(w, h, c, s_alpha, s_rv, s_gamma)(sim_w)
ocm_sim_f = one_comp_model(w, h, s_gamma)(sim_w)
# In[ ]:
plt.close()
plt.plot(w, sim_f_orgw, label="org_w")
plt.plot(sim_w, sim_f, label="sim")
plt.plot(sim_w, np.squeeze(tcm_sim_f), label="tcm sim")
plt.plot(sim_w, np.squeeze(ocm_sim_f), label="ocm sim")
plt.legend()
plt.show()
sim_f.shape
# sim_w, sim_f are the observations to perform chisquared against!
# In[ ]:
alphas = np.linspace(0.1, 0.3, 40)
rvs = np.arange(1.1, 2, 0.05)
gammas = np.arange(-0.9, 1, 0.015)
print(len(alphas), len(rvs), len(gammas))
# In[ ]:
tcm = two_comp_model(w, h, c, alphas=alphas, rvs=rvs, gammas=gammas)
# In[ ]:
# Two component model
tcm_obs = tcm(sim_w)
tcm_obs.shape
# In[ ]:
chi2 = chisquare(sim_f[:, np.newaxis, np.newaxis, np.newaxis], tcm_obs).statistic
print(chi2.shape)
min_indx = np.unravel_index(chi2.argmin(), chi2.shape)
print("sim results", alphas[min_indx[0]], min_rvs[indx[1]], gammas[min_indx[2]])
print("answer", answers)
# In[ ]:
# Putting resulted sim min values back into tcm model
res = two_comp_model(w, h, c, alphas[min_indx[0]], rvs[min_indx[1]], gammas[min_indx[2]])
res_f = res(sim_w) # Flux at the min min chisquare model evaulated at obs points.
# In[ ]:
# Compare to tcm generated simulation
chi2_tcm = chisquare(tcm_sim_f, tcm_obs).statistic
min_indx_tcm = np.unravel_index(chi2.argmin(), chi2.shape)
print("tcm results", alphas[min_indx_tcm[0]], rvs[min_indx_tcm[1]], gammas[min_indx_tcm[2]])
print("answer", answers)
# In[ ]:
# Putting resulted tcm sim min values back into tcm model
res_tcm = two_comp_model(w, h, c, alphas[min_indx[0]], rvs[min_indx[1]], gammas[min_indx[2]])
res_tcm_f = res_tcm(sim_w) # Flux at the min min chisquare model evaulated at obs points.
# In[ ]:
plt.plot(sim_w, sim_f, "--", label="org")
plt.plot(sim_w, np.squeeze(res_f), label= "2 comp")
plt.plot(sim_w, np.squeeze(res_tcm_f), label="fit to tcm sim")
plt.title("Comparison to Simulation")
plt.legend()
plt.show()
# In[ ]:
plt.close()
plt.figure()
# In[ ]:
plt.figure()
plt.contourf(chi2[:,:,0])
plt.figure()
plt.contourf(chi2[0,:,:])
# In[ ]:
plt.figure()
plt.contourf(chi2[:,1,:])
plt.figure()
# In[ ]:
# Slice arrays to make contour maps
xslice = np.arange(0, chi2.shape[0], 5)
yslice = np.arange(0, chi2.shape[1], 5)
zslice = np.arange(0, chi2.shape[2], 5)
for xs in xslice:
plt.figure()
plt.contourf(chi2[xs, :, :])
plt.colorbar()
plt.title("x alpha = {}".format(alphas[xs]))
plt.show()
# In[ ]:
for ys in yslice:
plt.figure()
plt.contourf(chi2[:, ys, :])
plt.colorbar()
plt.title("y rvs = {}".format(rvs[ys]))
plt.show()
# In[ ]:
for zs in zslice:
plt.figure()
plt.contourf(chi2[:, :, zs])
plt.colorbar()
plt.title("z gammas = {}".format(gammas[zs]))
plt.show()
# In[ ]:
for xs in np.concatenate([xslice, yslice, zslice]):
plt.close()
# In[ ]:
# In[ ]:
| [
"matplotlib.pyplot.contourf",
"numpy.arange",
"scipy.stats.chisquare",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.close",
"numpy.array",
"astropy.io.fits.getdata",
"numpy.linspace",
"matplotlib.pyplot.figure",
"nu... | [((2235, 2253), 'astropy.io.fits.getdata', 'fits.getdata', (['host'], {}), '(host)\n', (2247, 2253), False, 'from astropy.io import fits\n'), ((2258, 2276), 'astropy.io.fits.getdata', 'fits.getdata', (['comp'], {}), '(comp)\n', (2270, 2276), False, 'from astropy.io import fits\n'), ((2508, 2523), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (2516, 2523), True, 'import numpy as np\n'), ((2534, 2549), 'numpy.array', 'np.array', (['[1.5]'], {}), '([1.5])\n', (2542, 2549), True, 'import numpy as np\n'), ((2560, 2575), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (2568, 2575), True, 'import numpy as np\n'), ((2720, 2815), 'scipy.interpolate.interp1d', 'interp1d', (['((1 + s_gamma / c_kms) * w)', '((h + comp) / (1 + s_alpha))'], {'bounds_error': '(False)', 'axis': '(0)'}), '((1 + s_gamma / c_kms) * w, (h + comp) / (1 + s_alpha),\n bounds_error=False, axis=0)\n', (2728, 2815), False, 'from scipy.interpolate import interp1d\n'), ((2846, 2875), 'numpy.linspace', 'np.linspace', (['(2114)', '(2115)', '(1024)'], {}), '(2114, 2115, 1024)\n', (2857, 2875), True, 'import numpy as np\n'), ((3066, 3077), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3075, 3077), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3116), 'matplotlib.pyplot.plot', 'plt.plot', (['w', 'sim_f_orgw'], {'label': '"""org_w"""'}), "(w, sim_f_orgw, label='org_w')\n", (3086, 3116), True, 'import matplotlib.pyplot as plt\n'), ((3117, 3152), 'matplotlib.pyplot.plot', 'plt.plot', (['sim_w', 'sim_f'], {'label': '"""sim"""'}), "(sim_w, sim_f, label='sim')\n", (3125, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3277), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3275, 3277), True, 'import matplotlib.pyplot as plt\n'), ((3278, 3288), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3286, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3417), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(40)'], {}), '(0.1, 0.3, 40)\n', (3403, 3417), True, 'import numpy as np\n'), ((3424, 3447), 'numpy.arange', 'np.arange', (['(1.1)', '(2)', '(0.05)'], {}), '(1.1, 2, 0.05)\n', (3433, 3447), True, 'import numpy as np\n'), ((3457, 3482), 'numpy.arange', 'np.arange', (['(-0.9)', '(1)', '(0.015)'], {}), '(-0.9, 1, 0.015)\n', (3466, 3482), True, 'import numpy as np\n'), ((4769, 4810), 'matplotlib.pyplot.plot', 'plt.plot', (['sim_w', 'sim_f', '"""--"""'], {'label': '"""org"""'}), "(sim_w, sim_f, '--', label='org')\n", (4777, 4810), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4963), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison to Simulation"""'], {}), "('Comparison to Simulation')\n", (4935, 4963), True, 'import matplotlib.pyplot as plt\n'), ((4964, 4976), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4974, 4976), True, 'import matplotlib.pyplot as plt\n'), ((4977, 4987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4985, 4987), True, 'import matplotlib.pyplot as plt\n'), ((5001, 5012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5010, 5012), True, 'import matplotlib.pyplot as plt\n'), ((5013, 5025), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5023, 5025), True, 'import matplotlib.pyplot as plt\n'), ((5039, 5051), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5049, 5051), True, 'import matplotlib.pyplot as plt\n'), ((5052, 5079), 'matplotlib.pyplot.contourf', 'plt.contourf', (['chi2[:, :, 0]'], {}), '(chi2[:, :, 0])\n', (5064, 5079), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5090), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5088, 5090), True, 'import matplotlib.pyplot as plt\n'), ((5091, 5118), 'matplotlib.pyplot.contourf', 'plt.contourf', (['chi2[0, :, :]'], {}), '(chi2[0, :, :])\n', (5103, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5130, 5142), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5140, 5142), True, 'import matplotlib.pyplot as plt\n'), ((5143, 5170), 'matplotlib.pyplot.contourf', 'plt.contourf', (['chi2[:, 1, :]'], {}), '(chi2[:, 1, :])\n', (5155, 5170), True, 'import matplotlib.pyplot as plt\n'), ((5169, 5181), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5179, 5181), True, 'import matplotlib.pyplot as plt\n'), ((5242, 5272), 'numpy.arange', 'np.arange', (['(0)', 'chi2.shape[0]', '(5)'], {}), '(0, chi2.shape[0], 5)\n', (5251, 5272), True, 'import numpy as np\n'), ((5282, 5312), 'numpy.arange', 'np.arange', (['(0)', 'chi2.shape[1]', '(5)'], {}), '(0, chi2.shape[1], 5)\n', (5291, 5312), True, 'import numpy as np\n'), ((5322, 5352), 'numpy.arange', 'np.arange', (['(0)', 'chi2.shape[2]', '(5)'], {}), '(0, chi2.shape[2], 5)\n', (5331, 5352), True, 'import numpy as np\n'), ((5856, 5896), 'numpy.concatenate', 'np.concatenate', (['[xslice, yslice, zslice]'], {}), '([xslice, yslice, zslice])\n', (5870, 5896), True, 'import numpy as np\n'), ((1809, 1838), 'scipy.interpolate.interp1d', 'interp1d', (['w', 'am2rvm1g'], {'axis': '(0)'}), '(w, am2rvm1g, axis=0)\n', (1817, 1838), False, 'from scipy.interpolate import interp1d\n'), ((2208, 2225), 'astropy.io.fits.getdata', 'fits.getdata', (['wav'], {}), '(wav)\n', (2220, 2225), False, 'from astropy.io import fits\n'), ((2388, 2397), 'numpy.max', 'np.max', (['h'], {}), '(h)\n', (2394, 2397), True, 'import numpy as np\n'), ((2404, 2413), 'numpy.max', 'np.max', (['c'], {}), '(c)\n', (2410, 2413), True, 'import numpy as np\n'), ((2640, 2705), 'scipy.interpolate.interp1d', 'interp1d', (['((1 + s_rv / c_kms) * w)', '(s_alpha * c)'], {'bounds_error': '(False)'}), '((1 + s_rv / c_kms) * w, s_alpha * c, bounds_error=False)\n', (2648, 2705), False, 'from scipy.interpolate import interp1d\n'), ((3169, 3190), 'numpy.squeeze', 'np.squeeze', (['tcm_sim_f'], {}), '(tcm_sim_f)\n', (3179, 3190), True, 'import numpy as np\n'), ((3225, 3246), 'numpy.squeeze', 'np.squeeze', (['ocm_sim_f'], {}), '(ocm_sim_f)\n', (3235, 3246), True, 'import numpy as np\n'), ((3698, 3762), 'scipy.stats.chisquare', 'chisquare', (['sim_f[:, np.newaxis, np.newaxis, np.newaxis]', 'tcm_obs'], {}), '(sim_f[:, np.newaxis, np.newaxis, np.newaxis], tcm_obs)\n', (3707, 3762), False, 'from scipy.stats import chisquare\n'), ((4274, 4303), 'scipy.stats.chisquare', 'chisquare', (['tcm_sim_f', 'tcm_obs'], {}), '(tcm_sim_f, tcm_obs)\n', (4283, 4303), False, 'from scipy.stats import chisquare\n'), ((4827, 4844), 'numpy.squeeze', 'np.squeeze', (['res_f'], {}), '(res_f)\n', (4837, 4844), True, 'import numpy as np\n'), ((4879, 4900), 'numpy.squeeze', 'np.squeeze', (['res_tcm_f'], {}), '(res_tcm_f)\n', (4889, 4900), True, 'import numpy as np\n'), ((5376, 5388), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5386, 5388), True, 'import matplotlib.pyplot as plt\n'), ((5393, 5421), 'matplotlib.pyplot.contourf', 'plt.contourf', (['chi2[xs, :, :]'], {}), '(chi2[xs, :, :])\n', (5405, 5421), True, 'import matplotlib.pyplot as plt\n'), ((5426, 5440), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5438, 5440), True, 'import matplotlib.pyplot as plt\n'), ((5494, 5504), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5502, 5504), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5553, 5555), True, 'import matplotlib.pyplot as plt\n'), ((5560, 5588), 'matplotlib.pyplot.contourf', 'plt.contourf', (['chi2[:, ys, :]'], {}), '(chi2[:, ys, :])\n', (5572, 5588), True, 'import matplotlib.pyplot as plt\n'), ((5593, 5607), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5605, 5607), True, 'import matplotlib.pyplot as plt\n'), ((5656, 5666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5664, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5715), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5713, 5715), True, 'import matplotlib.pyplot as plt\n'), ((5720, 5748), 'matplotlib.pyplot.contourf', 'plt.contourf', (['chi2[:, :, zs]'], {}), '(chi2[:, :, zs])\n', (5732, 5748), True, 'import matplotlib.pyplot as plt\n'), ((5753, 5767), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5765, 5767), True, 'import matplotlib.pyplot as plt\n'), ((5822, 5832), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5830, 5832), True, 'import matplotlib.pyplot as plt\n'), ((5902, 5913), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5911, 5913), True, 'import matplotlib.pyplot as plt\n'), ((605, 623), 'numpy.asarray', 'np.asarray', (['alphas'], {}), '(alphas)\n', (615, 623), True, 'import numpy as np\n'), ((686, 701), 'numpy.asarray', 'np.asarray', (['rvs'], {}), '(rvs)\n', (696, 701), True, 'import numpy as np\n'), ((770, 788), 'numpy.asarray', 'np.asarray', (['gammas'], {}), '(gammas)\n', (780, 788), True, 'import numpy as np\n'), ((1221, 1269), 'scipy.interpolate.interp1d', 'interp1d', (['wav_i', 'am2'], {'axis': '(0)', 'bounds_error': '(False)'}), '(wav_i, am2, axis=0, bounds_error=False)\n', (1229, 1269), False, 'from scipy.interpolate import interp1d\n'), ((1735, 1787), 'scipy.interpolate.interp1d', 'interp1d', (['wav_j', 'am2rvm1'], {'axis': '(0)', 'bounds_error': '(False)'}), '(wav_j, am2rvm1, axis=0, bounds_error=False)\n', (1743, 1787), False, 'from scipy.interpolate import interp1d\n')] |
"""
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
thisPath=os.path.abspath('../../../')
print(thisPath)
if not thisPath in sys.path:
sys.path.append(thisPath)
import swhlab
import matplotlib.pyplot as plt
import numpy as np
def kernel_gaussian(size=100, sigma=None, forwardOnly=False):
"""
return a 1d gassuan array of a given size and sigma.
If sigma isn't given, it will be 1/10 of the size, which is usually good.
"""
if sigma is None:sigma=size/10
points=np.exp(-np.power(np.arange(size)-size/2,2)/(2*np.power(sigma,2)))
if forwardOnly:
points[:int(len(points)/2)]=0
return points/sum(points)
def inspectKernel(abf,Kmb):
plt.figure(figsize=(5,5))
plt.plot(np.arange(len(Kmb))/abf.pointsPerMs,Kmb)
plt.xlabel("time (ms)")
plt.grid()
plt.title("kernel")
plt.margins(0,.1)
plt.show()
def inspectMovingBaseline(abf,X,Y,Ymb):
plt.figure(figsize=(10,5))
ax1=plt.subplot(211)
plt.grid()
plt.plot(X,Y,alpha=.5)
plt.plot(X,Ymb,color='k',alpha=1)
plt.subplot(212,sharex=ax1)
plt.grid()
plt.axhline(0,color='k')
plt.plot(X,Y-Ymb,color='r',alpha=.5)
plt.margins(0,.1)
plt.axis([.70,1,None,None])
plt.tight_layout()
plt.show()
def inspectFirstDeriv(abf,X,Y,dTms=1):
dT=int(dTms*abf.pointsPerMs)
dY=Y[dT:]-Y[:-dT]
plt.figure(figsize=(10,5))
plt.grid()
plt.margins(0,.1)
plt.plot(X[:len(dY)],dY)
plt.axis([.70,1,None,None])
plt.tight_layout()
plt.show()
return
def inspectLPF(abf,X,Y,Ylpf):
plt.figure(figsize=(10,5))
ax1=plt.subplot(211)
plt.ylabel("original data")
plt.grid()
plt.plot(X,Y,alpha=.5)
plt.subplot(212,sharex=ax1)
plt.ylabel("lowpass filtered")
plt.grid()
plt.plot(X,Ylpf,color='b',alpha=.5)
plt.margins(0,.1)
plt.axis([.70,1,None,None])
plt.tight_layout()
plt.show()
def inspectTrace(abf,X,Y):
plt.figure(figsize=(10,5))
plt.grid()
plt.plot(X,Y,color='b',alpha=.5)
plt.margins(0,.1)
plt.axis([.70,1,None,None])
plt.tight_layout()
plt.show()
def inspectTraces(abf,X,Y1,Y2):
plt.figure(figsize=(10,5))
ax1=plt.subplot(211)
plt.grid()
plt.plot(X,Y1,color='b',alpha=.5)
plt.subplot(212,sharex=ax1)
plt.grid()
plt.plot(X,Y2,color='b',alpha=.5)
plt.margins(0,.1)
plt.axis([.70,1,None,None])
plt.tight_layout()
plt.show()
def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False):
"""
m1 and m2, if given, are in seconds.
returns [# EPSCs, # IPSCs]
"""
abf.setsweep(sweep)
if m1 is None: m1=0
else: m1=m1*abf.pointsPerSec
if m2 is None: m2=-1
else: m2=m2*abf.pointsPerSec
# obtain X and Y
Yorig=abf.sweepY[int(m1):int(m2)]
X=np.arange(len(Yorig))/abf.pointsPerSec
# start by lowpass filtering (1 direction)
# Klpf=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True)
# Ylpf=np.convolve(Yorig,Klpf,mode='same')
# Y=Ylpf # commit
Kmb=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True)
Ymb=np.convolve(Yorig,Kmb,mode='same')
Y=Yorig-Ymb # commit
#Y1=np.copy(Y)
#Y[np.where(Y>0)[0]]=np.power(Y,2)
#Y[np.where(Y<0)[0]]=-np.power(Y,2)
# event detection
thresh=5 # threshold for an event
hitPos=np.where(Y>thresh)[0] # area above the threshold
hitNeg=np.where(Y<-thresh)[0] # area below the threshold
hitPos=np.concatenate((hitPos,[len(Y)-1])) # helps with the diff() coming up
hitNeg=np.concatenate((hitNeg,[len(Y)-1])) # helps with the diff() coming up
hitsPos=hitPos[np.where(np.abs(np.diff(hitPos))>10)[0]] # time point of EPSC
hitsNeg=hitNeg[np.where(np.abs(np.diff(hitNeg))>10)[0]] # time point of IPSC
hitsNeg=hitsNeg[1:] # often the first one is in error
#print(hitsNeg[0])
if plotToo:
plt.figure(figsize=(10,5))
ax1=plt.subplot(211)
plt.title("sweep %d: detected %d IPSCs (red) and %d EPSCs (blue)"%(sweep,len(hitsPos),len(hitsNeg)))
plt.ylabel("delta pA")
plt.grid()
plt.plot(X,Yorig,color='k',alpha=.5)
for hit in hitsPos:
plt.plot(X[hit],Yorig[hit]+20,'r.',ms=20,alpha=.5)
for hit in hitsNeg:
plt.plot(X[hit],Yorig[hit]-20,'b.',ms=20,alpha=.5)
plt.margins(0,.1)
plt.subplot(212,sharex=ax1)
plt.title("moving gaussian baseline subtraction used for threshold detection")
plt.ylabel("delta pA")
plt.grid()
plt.axhline(thresh,color='r',ls='--',alpha=.5,lw=3)
plt.axhline(-thresh,color='r',ls='--',alpha=.5,lw=3)
plt.plot(X,Y,color='b',alpha=.5)
plt.axis([X[0],X[-1],-thresh*1.5,thresh*1.5])
plt.tight_layout()
if type(plotToo) is str and os.path.isdir(plotToo):
print('saving %s/%05d.jpg'%(plotToo,sweep))
plt.savefig(plotToo+"/%05d.jpg"%sweep)
else:
plt.show()
plt.close('all')
return [len(hitsPos),len(hitsNeg)]
def indexPics(folder):
pics=[x for x in os.listdir(folder) if x.endswith(".png") or x.endswith(".jpg")]
pics=['<a href="%s"><img src="%s"></a>'%(x,x) for x in sorted(pics)]
with open(folder+"/index.html",'w') as f:
f.write("<html><body>"+"<br><br><br><br>".join(pics)+"</body></html>")
def analyzeABF(abf):
abf=swhlab.ABF(abf)
EPSCs=[]
IPSCs=[]
Xs=np.arange(abf.sweeps)*float(abf.sweepLength)/60.0
for sweep in range(abf.sweeps):
print("analyzing sweep %d of %d"%(sweep+1,abf.sweeps))
plotToo=False
if 0<Xs[sweep]<120:
plotToo=r'C:\Users\swharden\Documents\temp'
[hitsPos,hitsNeg]=analyzeSweep(abf,sweep=sweep,m1=.3,plotToo=plotToo)
EPSCs.append(hitsPos/(float(abf.sweepLength)-.3))
IPSCs.append(hitsNeg/(float(abf.sweepLength)-.3))
EPSCsmooth=np.convolve(EPSCs,kernel_gaussian(20),mode='same')
IPSCsmooth=np.convolve(IPSCs,kernel_gaussian(20),mode='same')
plt.figure(figsize=(10,5))
plt.grid()
plt.plot(Xs,EPSCsmooth,'.',color='r',label="EPSCs",ms=10,alpha=.5)
plt.plot(Xs,IPSCsmooth,'.',color='b',label="IPSCs",ms=10,alpha=.5)
plt.axhline(0,color='k',lw=2)
plt.legend()
for t in abf.comment_times:
plt.axvline(t/60,color='k',lw=2,ls='--',alpha=.5)
plt.margins(0,.1)
plt.ylabel("event frequency (Hz)")
plt.xlabel("time (minutes)")
plt.show()
indexPics(r'C:\Users\swharden\Documents\temp')
if __name__=="__main__":
analyzeABF(r"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf")
print("DONE")
| [
"matplotlib.pyplot.grid",
"numpy.convolve",
"matplotlib.pyplot.ylabel",
"sys.path.append",
"matplotlib.pyplot.margins",
"numpy.arange",
"os.listdir",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diff",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.close",
"... | [((180, 208), 'os.path.abspath', 'os.path.abspath', (['"""../../../"""'], {}), "('../../../')\n", (195, 208), False, 'import os\n'), ((258, 283), 'sys.path.append', 'sys.path.append', (['thisPath'], {}), '(thisPath)\n', (273, 283), False, 'import sys\n'), ((797, 823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (807, 823), True, 'import matplotlib.pyplot as plt\n'), ((881, 904), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (ms)"""'], {}), "('time (ms)')\n", (891, 904), True, 'import matplotlib.pyplot as plt\n'), ((909, 919), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (917, 919), True, 'import matplotlib.pyplot as plt\n'), ((924, 943), 'matplotlib.pyplot.title', 'plt.title', (['"""kernel"""'], {}), "('kernel')\n", (933, 943), True, 'import matplotlib.pyplot as plt\n'), ((948, 967), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (959, 967), True, 'import matplotlib.pyplot as plt\n'), ((970, 980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (978, 980), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1053), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1036, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1072, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1092), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1090, 1092), True, 'import matplotlib.pyplot as plt\n'), ((1097, 1122), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'alpha': '(0.5)'}), '(X, Y, alpha=0.5)\n', (1105, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1160), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Ymb'], {'color': '"""k"""', 'alpha': '(1)'}), "(X, Ymb, color='k', alpha=1)\n", (1132, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1190), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (1173, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1204), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1202, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1234), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""k"""'}), "(0, color='k')\n", (1220, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1238, 1280), 'matplotlib.pyplot.plot', 'plt.plot', (['X', '(Y - Ymb)'], {'color': '"""r"""', 'alpha': '(0.5)'}), "(X, Y - Ymb, color='r', alpha=0.5)\n", (1246, 1280), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1298), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (1290, 1298), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1331), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.7, 1, None, None]'], {}), '([0.7, 1, None, None])\n', (1309, 1331), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1351), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1349, 1351), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1366), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1364, 1366), True, 'import matplotlib.pyplot as plt\n'), ((1467, 1494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1477, 1494), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1508), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1506, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1532), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (1524, 1532), True, 'import matplotlib.pyplot as plt\n'), ((1564, 1594), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.7, 1, None, None]'], {}), '([0.7, 1, None, None])\n', (1572, 1594), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1614), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1612, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1629), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1627, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1703), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1686, 1703), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1727), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1722, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""original data"""'], {}), "('original data')\n", (1742, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1774), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1772, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1804), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'alpha': '(0.5)'}), '(X, Y, alpha=0.5)\n', (1787, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1834), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (1817, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1868), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""lowpass filtered"""'], {}), "('lowpass filtered')\n", (1848, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1873, 1883), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1881, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1927), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Ylpf'], {'color': '"""b"""', 'alpha': '(0.5)'}), "(X, Ylpf, color='b', alpha=0.5)\n", (1896, 1927), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1947), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (1939, 1947), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1980), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.7, 1, None, None]'], {}), '([0.7, 1, None, None])\n', (1958, 1980), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2000), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1998, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2005, 2015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2013, 2015), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (2058, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2089), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2087, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2130), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'color': '"""b"""', 'alpha': '(0.5)'}), "(X, Y, color='b', alpha=0.5)\n", (2102, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2131, 2150), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (2142, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2183), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.7, 1, None, None]'], {}), '([0.7, 1, None, None])\n', (2161, 2183), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2203), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2201, 2203), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2216, 2218), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2283), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (2266, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2307), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2302, 2307), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2322), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2320, 2322), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2364), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y1'], {'color': '"""b"""', 'alpha': '(0.5)'}), "(X, Y1, color='b', alpha=0.5)\n", (2335, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2393), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (2376, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2407), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2405, 2407), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2449), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y2'], {'color': '"""b"""', 'alpha': '(0.5)'}), "(X, Y2, color='b', alpha=0.5)\n", (2420, 2449), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2469), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (2461, 2469), True, 'import matplotlib.pyplot as plt\n'), ((2472, 2502), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.7, 1, None, None]'], {}), '([0.7, 1, None, None])\n', (2480, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2522), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2520, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2537), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2535, 2537), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3225), 'numpy.convolve', 'np.convolve', (['Yorig', 'Kmb'], {'mode': '"""same"""'}), "(Yorig, Kmb, mode='same')\n", (3200, 3225), True, 'import numpy as np\n'), ((5453, 5468), 'swhlab.ABF', 'swhlab.ABF', (['abf'], {}), '(abf)\n', (5463, 5468), False, 'import swhlab\n'), ((6087, 6114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (6097, 6114), True, 'import matplotlib.pyplot as plt\n'), ((6118, 6128), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6126, 6128), True, 'import matplotlib.pyplot as plt\n'), ((6133, 6206), 'matplotlib.pyplot.plot', 'plt.plot', (['Xs', 'EPSCsmooth', '"""."""'], {'color': '"""r"""', 'label': '"""EPSCs"""', 'ms': '(10)', 'alpha': '(0.5)'}), "(Xs, EPSCsmooth, '.', color='r', label='EPSCs', ms=10, alpha=0.5)\n", (6141, 6206), True, 'import matplotlib.pyplot as plt\n'), ((6204, 6277), 'matplotlib.pyplot.plot', 'plt.plot', (['Xs', 'IPSCsmooth', '"""."""'], {'color': '"""b"""', 'label': '"""IPSCs"""', 'ms': '(10)', 'alpha': '(0.5)'}), "(Xs, IPSCsmooth, '.', color='b', label='IPSCs', ms=10, alpha=0.5)\n", (6212, 6277), True, 'import matplotlib.pyplot as plt\n'), ((6275, 6306), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""k"""', 'lw': '(2)'}), "(0, color='k', lw=2)\n", (6286, 6306), True, 'import matplotlib.pyplot as plt\n'), ((6309, 6321), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6319, 6321), True, 'import matplotlib.pyplot as plt\n'), ((6416, 6435), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (6427, 6435), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6472), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""event frequency (Hz)"""'], {}), "('event frequency (Hz)')\n", (6448, 6472), True, 'import matplotlib.pyplot as plt\n'), ((6477, 6505), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (minutes)"""'], {}), "('time (minutes)')\n", (6487, 6505), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6518, 6520), True, 'import matplotlib.pyplot as plt\n'), ((3420, 3440), 'numpy.where', 'np.where', (['(Y > thresh)'], {}), '(Y > thresh)\n', (3428, 3440), True, 'import numpy as np\n'), ((3480, 3501), 'numpy.where', 'np.where', (['(Y < -thresh)'], {}), '(Y < -thresh)\n', (3488, 3501), True, 'import numpy as np\n'), ((3960, 3987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (3970, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3999, 4015), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (4010, 4015), True, 'import matplotlib.pyplot as plt\n'), ((4133, 4155), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""delta pA"""'], {}), "('delta pA')\n", (4143, 4155), True, 'import matplotlib.pyplot as plt\n'), ((4164, 4174), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4172, 4174), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4224), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Yorig'], {'color': '"""k"""', 'alpha': '(0.5)'}), "(X, Yorig, color='k', alpha=0.5)\n", (4192, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4411, 4430), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (4422, 4430), True, 'import matplotlib.pyplot as plt\n'), ((4438, 4466), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (4449, 4466), True, 'import matplotlib.pyplot as plt\n'), ((4474, 4552), 'matplotlib.pyplot.title', 'plt.title', (['"""moving gaussian baseline subtraction used for threshold detection"""'], {}), "('moving gaussian baseline subtraction used for threshold detection')\n", (4483, 4552), True, 'import matplotlib.pyplot as plt\n'), ((4561, 4583), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""delta pA"""'], {}), "('delta pA')\n", (4571, 4583), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4602), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4600, 4602), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4667), 'matplotlib.pyplot.axhline', 'plt.axhline', (['thresh'], {'color': '"""r"""', 'ls': '"""--"""', 'alpha': '(0.5)', 'lw': '(3)'}), "(thresh, color='r', ls='--', alpha=0.5, lw=3)\n", (4622, 4667), True, 'import matplotlib.pyplot as plt\n'), ((4671, 4728), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(-thresh)'], {'color': '"""r"""', 'ls': '"""--"""', 'alpha': '(0.5)', 'lw': '(3)'}), "(-thresh, color='r', ls='--', alpha=0.5, lw=3)\n", (4682, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4732, 4768), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'color': '"""b"""', 'alpha': '(0.5)'}), "(X, Y, color='b', alpha=0.5)\n", (4740, 4768), True, 'import matplotlib.pyplot as plt\n'), ((4774, 4826), 'matplotlib.pyplot.axis', 'plt.axis', (['[X[0], X[-1], -thresh * 1.5, thresh * 1.5]'], {}), '([X[0], X[-1], -thresh * 1.5, thresh * 1.5])\n', (4782, 4826), True, 'import matplotlib.pyplot as plt\n'), ((4828, 4846), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4844, 4846), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5075), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5068, 5075), True, 'import matplotlib.pyplot as plt\n'), ((6362, 6418), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(t / 60)'], {'color': '"""k"""', 'lw': '(2)', 'ls': '"""--"""', 'alpha': '(0.5)'}), "(t / 60, color='k', lw=2, ls='--', alpha=0.5)\n", (6373, 6418), True, 'import matplotlib.pyplot as plt\n'), ((4261, 4318), 'matplotlib.pyplot.plot', 'plt.plot', (['X[hit]', '(Yorig[hit] + 20)', '"""r."""'], {'ms': '(20)', 'alpha': '(0.5)'}), "(X[hit], Yorig[hit] + 20, 'r.', ms=20, alpha=0.5)\n", (4269, 4318), True, 'import matplotlib.pyplot as plt\n'), ((4352, 4409), 'matplotlib.pyplot.plot', 'plt.plot', (['X[hit]', '(Yorig[hit] - 20)', '"""b."""'], {'ms': '(20)', 'alpha': '(0.5)'}), "(X[hit], Yorig[hit] - 20, 'b.', ms=20, alpha=0.5)\n", (4360, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4905), 'os.path.isdir', 'os.path.isdir', (['plotToo'], {}), '(plotToo)\n', (4896, 4905), False, 'import os\n'), ((4975, 5017), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plotToo + '/%05d.jpg' % sweep)"], {}), "(plotToo + '/%05d.jpg' % sweep)\n", (4986, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5040, 5050), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5048, 5050), True, 'import matplotlib.pyplot as plt\n'), ((5161, 5179), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (5171, 5179), False, 'import os\n'), ((5502, 5523), 'numpy.arange', 'np.arange', (['abf.sweeps'], {}), '(abf.sweeps)\n', (5511, 5523), True, 'import numpy as np\n'), ((656, 674), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (664, 674), True, 'import numpy as np\n'), ((627, 642), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (636, 642), True, 'import numpy as np\n'), ((3727, 3742), 'numpy.diff', 'np.diff', (['hitPos'], {}), '(hitPos)\n', (3734, 3742), True, 'import numpy as np\n'), ((3808, 3823), 'numpy.diff', 'np.diff', (['hitNeg'], {}), '(hitNeg)\n', (3815, 3823), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 16:15:32 2021
@author: furqanafzal
"""
#%%modules
import os
path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/Code/trakr'
os.chdir(path)
import numpy as np
import matplotlib.pylab as plt
import modules
import importlib
importlib.reload(modules)
from modules import add_noise,standardize_data,cross_val_metrics_naiveB
#%% load data
# path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/erin_collab/variabledata'
# os.chdir(path)
path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results'
os.chdir(path)
X=np.load('permutedseqMNIST_alldigits.npy')
# X=np.load('mnist_trakr_X_alldigits.npy')
# X=standardize_data(X)
y=np.load('mnist_trakr_labels_alldigits.npy')
#%% performance and evaluation - metrics
accuracy,aucvec=cross_val_metrics_naiveB(X,y,n_classes=10,splits=10)
#%%
performance_metrics=dict()
performance_metrics['accuracy']=accuracy
performance_metrics['auc']=aucvec
#%%
import pickle
with open('/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results/noisyinput_metrics_nb_permutedmnist_noiselimitupto_5', 'wb') as f:
pickle.dump(metrics, f)
#%% Noisy Inputs
path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results'
os.chdir(path)
y=np.load('mnist_trakr_labels_alldigits.npy')
level=np.linspace(1,6,50)
metrics=dict()
for loop in range(len(level)):
X=np.load('permutedseqMNIST_alldigits.npy')
sigma=level[loop]
X=add_noise(X,sigma)
accuracy,aucvec=cross_val_metrics_naiveB(X,y,n_classes=10,splits=10)
metrics[f'Noiselevel {level[loop]} - accuracy']=accuracy
metrics[f'Noiselevel {level[loop]} - auc']=aucvec
print(f'On Noiselevel {level[loop]}')
| [
"pickle.dump",
"modules.cross_val_metrics_naiveB",
"modules.add_noise",
"os.chdir",
"numpy.linspace",
"importlib.reload",
"numpy.load"
] | [((208, 222), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (216, 222), False, 'import os\n'), ((305, 330), 'importlib.reload', 'importlib.reload', (['modules'], {}), '(modules)\n', (321, 330), False, 'import importlib\n'), ((661, 675), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (669, 675), False, 'import os\n'), ((679, 720), 'numpy.load', 'np.load', (['"""permutedseqMNIST_alldigits.npy"""'], {}), "('permutedseqMNIST_alldigits.npy')\n", (686, 720), True, 'import numpy as np\n'), ((790, 833), 'numpy.load', 'np.load', (['"""mnist_trakr_labels_alldigits.npy"""'], {}), "('mnist_trakr_labels_alldigits.npy')\n", (797, 833), True, 'import numpy as np\n'), ((892, 947), 'modules.cross_val_metrics_naiveB', 'cross_val_metrics_naiveB', (['X', 'y'], {'n_classes': '(10)', 'splits': '(10)'}), '(X, y, n_classes=10, splits=10)\n', (916, 947), False, 'from modules import add_noise, standardize_data, cross_val_metrics_naiveB\n'), ((1422, 1436), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1430, 1436), False, 'import os\n'), ((1439, 1482), 'numpy.load', 'np.load', (['"""mnist_trakr_labels_alldigits.npy"""'], {}), "('mnist_trakr_labels_alldigits.npy')\n", (1446, 1482), True, 'import numpy as np\n'), ((1489, 1510), 'numpy.linspace', 'np.linspace', (['(1)', '(6)', '(50)'], {}), '(1, 6, 50)\n', (1500, 1510), True, 'import numpy as np\n'), ((1263, 1286), 'pickle.dump', 'pickle.dump', (['metrics', 'f'], {}), '(metrics, f)\n', (1274, 1286), False, 'import pickle\n'), ((1562, 1603), 'numpy.load', 'np.load', (['"""permutedseqMNIST_alldigits.npy"""'], {}), "('permutedseqMNIST_alldigits.npy')\n", (1569, 1603), True, 'import numpy as np\n'), ((1632, 1651), 'modules.add_noise', 'add_noise', (['X', 'sigma'], {}), '(X, sigma)\n', (1641, 1651), False, 'from modules import add_noise, standardize_data, cross_val_metrics_naiveB\n'), ((1672, 1727), 'modules.cross_val_metrics_naiveB', 'cross_val_metrics_naiveB', (['X', 'y'], {'n_classes': '(10)', 'splits': '(10)'}), '(X, y, n_classes=10, splits=10)\n', (1696, 1727), False, 'from modules import add_noise, standardize_data, cross_val_metrics_naiveB\n')] |
from tkinter import N
import os
import numpy as np
from .main import DataLoader
from .timeseriesDLs import GridDataGenPyTorch
class TimeSeriesDataLoader(DataLoader):
def __init__(
self,
path: str,
file_ext: str,
recursive: bool,
iw_params: dict,
ow_params=None,
) -> None:
super().__init__(path, file_ext, recursive)
# list of files or directories
if file_ext == "dir":
self.ilist = [path + "/" + f + "/" for f in os.listdir(path)]
# for windowing the data
self.x_indexer = self.get_indexer(
iw_params["n_rows"],
iw_params["window_size"],
iw_params["shift_size"],
iw_params["start_at"],
iw_params["n_signals"]
)
if ow_params is not None:
self.y_indexer = self.get_indexer(ow_params["n_rows"],
ow_params["window_size"],
ow_params["shift_size"],
ow_params["start_at"],
ow_params["n_signals"]
)
def load_data(
self,
repeat_cols,
d_type
):
super().load_data()
dataset = GridDataGenPyTorch(
self.ilist,
self.x_indexer.shape[0],
self.x_indexer.shape[1],
repeat_cols,
self.x_indexer,
self.y_indexer,
d_type=d_type
)
print(dataset)
def get_indexer(n_rows, window_size, shift_size, start_point, leave_last):
return np.arange(window_size)[None, :] + start_point + shift_size*np.arange(((n_rows - window_size - leave_last - start_point) // shift_size) + 1)[:, None]
| [
"os.listdir",
"numpy.arange"
] | [((512, 528), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (522, 528), False, 'import os\n'), ((1580, 1602), 'numpy.arange', 'np.arange', (['window_size'], {}), '(window_size)\n', (1589, 1602), True, 'import numpy as np\n'), ((1639, 1717), 'numpy.arange', 'np.arange', (['((n_rows - window_size - leave_last - start_point) // shift_size + 1)'], {}), '((n_rows - window_size - leave_last - start_point) // shift_size + 1)\n', (1648, 1717), True, 'import numpy as np\n')] |
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## ICA model for TE data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% import required packages
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import FastICA
import matplotlib.pyplot as plt
#%% fetch TE data and select variables as done in Lee et al.
TEdata_noFault_train = np.loadtxt('d00.dat').T # data arrnagement in d00.dat is different than that in other files
xmeas = TEdata_noFault_train[:,0:22]
xmv = TEdata_noFault_train[:,41:52]
data_noFault_train = np.hstack((xmeas, xmv))
#%% scale data
scaler = StandardScaler()
data_train_normal = scaler.fit_transform(data_noFault_train)
#%% fit ICA model
ica = FastICA(max_iter=1000, tol=0.005, random_state=1).fit(data_train_normal)
#%% decide # of ICs to retain via PCA variance method
from sklearn.decomposition import PCA
pca = PCA().fit(data_train_normal)
explained_variance = 100*pca.explained_variance_ratio_
cum_explained_variance = np.cumsum(explained_variance)
n_comp = np.argmax(cum_explained_variance >= 90) + 1
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Monitoring statistics function
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def compute_ICA_monitoring_metrics(ica_model, number_comp, data):
""" calculate monitoring statistics for given data
parameters
-----------
data: numpy array of shape = [n_samples, n_features]
Training or test data
Returns
----------
monitoring_stats: numpy array of shape = [n_samples, 3]
"""
# data parameters
n = data.shape[0]
# model parameters
W = ica.components_
L2_norm = np.linalg.norm(W, 2, axis=1)
sort_order = np.flip(np.argsort(L2_norm))
W_sorted = W[sort_order,:]
# I2
Wd = W_sorted[0:number_comp,:]
Sd = np.dot(Wd, data.T)
I2 = np.array([np.dot(Sd[:,i], Sd[:,i]) for i in range(n)])
# Ie2
We = W_sorted[n_comp:,:]
Se = np.dot(We, data.T)
Ie2 = np.array([np.dot(Se[:,i], Se[:,i]) for i in range(n)])
# SPE
Q = ica.whitening_
Q_inv = np.linalg.inv(Q)
A = ica.mixing_
B = np.dot(Q, A)
B_sorted = B[:,sort_order]
Bd = B_sorted[:,0:n_comp]
data_reconstruct = np.dot(np.dot(np.dot(Q_inv, Bd), Wd), data.T)
e = data.T - data_reconstruct
SPE = np.array([np.dot(e[:,i], e[:,i]) for i in range(n)])
monitoring_stats = np.column_stack((I2, Ie2, SPE))
return monitoring_stats
def draw_monitoring_chart(values, CL, yLabel):
plt.figure()
plt.plot(values)
plt.axhline(CL, color = "red", linestyle = "--")
plt.xlabel('Sample #')
plt.ylabel(yLabel)
plt.show()
def draw_ICA_monitoring_charts(ICA_statistics, CLs, trainORtest):
""" draw monitoring charts for given data
parameters
-----------
ICA_statistics: numpy array of shape = [n_samples, 3]
CLs: List of control limits
trainORtest: 'training' or 'test'
"""
# I2 chart, Ie2 chart, SPE chart
draw_monitoring_chart(ICA_statistics[:,0], CLs[0], 'I2 for ' + trainORtest + ' data')
draw_monitoring_chart(ICA_statistics[:,1], CLs[1], 'Ie2 for ' + trainORtest + ' data')
draw_monitoring_chart(ICA_statistics[:,2], CLs[2], 'SPE for ' + trainORtest + ' data')
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Draw monitoring charts for training data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ICA_statistics_train = compute_ICA_monitoring_metrics(ica, n_comp, data_train_normal)
I2_CL = np.percentile(ICA_statistics_train[:,0], 99)
Ie2_CL = np.percentile(ICA_statistics_train[:,1], 99)
SPE_CL = np.percentile(ICA_statistics_train[:,2], 99)
draw_ICA_monitoring_charts(ICA_statistics_train, [I2_CL, Ie2_CL, SPE_CL], 'training')
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## FAR / FDR computation function
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def compute_alarmRate(monitoring_stats, CLs):
""" calculate alarm rate
parameters
-----------
monitoring_stats: numpy array of shape = [n_samples, 3]
CLs: List of control limits
Returns
----------
alarmRate: float
"""
violationFlag = monitoring_stats > CLs
alarm_overall = np.any(violationFlag, axis=1) # violation of any metric => alarm
alarmRate = 100*np.sum(alarm_overall)/monitoring_stats.shape[0]
return alarmRate
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Draw monitoring charts for test data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# fetch data and select data as done in Lee et al.
TEdata_Fault_test = np.loadtxt('d10_te.dat')
xmeas = TEdata_Fault_test[:,0:22]
xmv = TEdata_Fault_test[:,41:52]
data_Fault_test = np.hstack((xmeas, xmv))
# scale data
data_test_scaled = scaler.transform(data_Fault_test)
# compute statistics and draw charts
ICA_statistics_test = compute_ICA_monitoring_metrics(ica, n_comp, data_test_scaled)
draw_ICA_monitoring_charts(ICA_statistics_test, [I2_CL, Ie2_CL, SPE_CL], 'test')
# compute FAR or FDR
alarmRate = compute_alarmRate(ICA_statistics_test[160:,:], [I2_CL, Ie2_CL, SPE_CL]) # faults start from sample 160
print(alarmRate)
| [
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"numpy.column_stack",
"numpy.argsort",
"numpy.cumsum",
"numpy.linalg.norm",
"sklearn.decomposition.FastICA",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.dot",
"numpy.argmax"... | [((669, 692), 'numpy.hstack', 'np.hstack', (['(xmeas, xmv)'], {}), '((xmeas, xmv))\n', (678, 692), True, 'import numpy as np\n'), ((730, 746), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (744, 746), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1137, 1166), 'numpy.cumsum', 'np.cumsum', (['explained_variance'], {}), '(explained_variance)\n', (1146, 1166), True, 'import numpy as np\n'), ((3941, 3986), 'numpy.percentile', 'np.percentile', (['ICA_statistics_train[:, 0]', '(99)'], {}), '(ICA_statistics_train[:, 0], 99)\n', (3954, 3986), True, 'import numpy as np\n'), ((3996, 4041), 'numpy.percentile', 'np.percentile', (['ICA_statistics_train[:, 1]', '(99)'], {}), '(ICA_statistics_train[:, 1], 99)\n', (4009, 4041), True, 'import numpy as np\n'), ((4051, 4096), 'numpy.percentile', 'np.percentile', (['ICA_statistics_train[:, 2]', '(99)'], {}), '(ICA_statistics_train[:, 2], 99)\n', (4064, 4096), True, 'import numpy as np\n'), ((5221, 5245), 'numpy.loadtxt', 'np.loadtxt', (['"""d10_te.dat"""'], {}), "('d10_te.dat')\n", (5231, 5245), True, 'import numpy as np\n'), ((5336, 5359), 'numpy.hstack', 'np.hstack', (['(xmeas, xmv)'], {}), '((xmeas, xmv))\n', (5345, 5359), True, 'import numpy as np\n'), ((478, 499), 'numpy.loadtxt', 'np.loadtxt', (['"""d00.dat"""'], {}), "('d00.dat')\n", (488, 499), True, 'import numpy as np\n'), ((1180, 1219), 'numpy.argmax', 'np.argmax', (['(cum_explained_variance >= 90)'], {}), '(cum_explained_variance >= 90)\n', (1189, 1219), True, 'import numpy as np\n'), ((1935, 1963), 'numpy.linalg.norm', 'np.linalg.norm', (['W', '(2)'], {'axis': '(1)'}), '(W, 2, axis=1)\n', (1949, 1963), True, 'import numpy as np\n'), ((2109, 2127), 'numpy.dot', 'np.dot', (['Wd', 'data.T'], {}), '(Wd, data.T)\n', (2115, 2127), True, 'import numpy as np\n'), ((2255, 2273), 'numpy.dot', 'np.dot', (['We', 'data.T'], {}), '(We, data.T)\n', (2261, 2273), True, 'import numpy as np\n'), ((2399, 2415), 'numpy.linalg.inv', 'np.linalg.inv', (['Q'], {}), '(Q)\n', (2412, 2415), True, 'import numpy as np\n'), ((2446, 2458), 'numpy.dot', 'np.dot', (['Q', 'A'], {}), '(Q, A)\n', (2452, 2458), True, 'import numpy as np\n'), ((2727, 2758), 'numpy.column_stack', 'np.column_stack', (['(I2, Ie2, SPE)'], {}), '((I2, Ie2, SPE))\n', (2742, 2758), True, 'import numpy as np\n'), ((2843, 2855), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2853, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2877), 'matplotlib.pyplot.plot', 'plt.plot', (['values'], {}), '(values)\n', (2869, 2877), True, 'import matplotlib.pyplot as plt\n'), ((2883, 2927), 'matplotlib.pyplot.axhline', 'plt.axhline', (['CL'], {'color': '"""red"""', 'linestyle': '"""--"""'}), "(CL, color='red', linestyle='--')\n", (2894, 2927), True, 'import matplotlib.pyplot as plt\n'), ((2937, 2959), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample #"""'], {}), "('Sample #')\n", (2947, 2959), True, 'import matplotlib.pyplot as plt\n'), ((2965, 2983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yLabel'], {}), '(yLabel)\n', (2975, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2989, 2999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2997, 2999), True, 'import matplotlib.pyplot as plt\n'), ((4760, 4789), 'numpy.any', 'np.any', (['violationFlag'], {'axis': '(1)'}), '(violationFlag, axis=1)\n', (4766, 4789), True, 'import numpy as np\n'), ((848, 897), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'max_iter': '(1000)', 'tol': '(0.005)', 'random_state': '(1)'}), '(max_iter=1000, tol=0.005, random_state=1)\n', (855, 897), False, 'from sklearn.decomposition import FastICA\n'), ((1024, 1029), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1027, 1029), False, 'from sklearn.decomposition import PCA\n'), ((1990, 2009), 'numpy.argsort', 'np.argsort', (['L2_norm'], {}), '(L2_norm)\n', (2000, 2009), True, 'import numpy as np\n'), ((2148, 2174), 'numpy.dot', 'np.dot', (['Sd[:, i]', 'Sd[:, i]'], {}), '(Sd[:, i], Sd[:, i])\n', (2154, 2174), True, 'import numpy as np\n'), ((2295, 2321), 'numpy.dot', 'np.dot', (['Se[:, i]', 'Se[:, i]'], {}), '(Se[:, i], Se[:, i])\n', (2301, 2321), True, 'import numpy as np\n'), ((2566, 2583), 'numpy.dot', 'np.dot', (['Q_inv', 'Bd'], {}), '(Q_inv, Bd)\n', (2572, 2583), True, 'import numpy as np\n'), ((2654, 2678), 'numpy.dot', 'np.dot', (['e[:, i]', 'e[:, i]'], {}), '(e[:, i], e[:, i])\n', (2660, 2678), True, 'import numpy as np\n'), ((4846, 4867), 'numpy.sum', 'np.sum', (['alarm_overall'], {}), '(alarm_overall)\n', (4852, 4867), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import matplotlib.image as mpimg
from skimage.feature import hog
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:,:,0], size).ravel()
color2 = cv2.resize(img[:,:,1], size).ravel()
color3 = cv2.resize(img[:,:,2], size).ravel()
return np.hstack((color1, color2, color3))
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
def extract_color_features(imgs, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, hist_range=(0, 256)):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
# Apply bin_spatial() to get spatial color features
spatial_features = bin_spatial(feature_image, size=spatial_size)
# Apply color_hist() also with a color space option now
hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range)
# Append the new feature vector to the features list
features.append(np.concatenate((spatial_features, hist_features)))
# Return list of feature vectors
return features
def get_hog(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=False,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=False,
visualise=vis, feature_vector=feature_vec)
return features
def extract_hog_features(imgs, cspace='RGB', orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
# Call get_hog() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
features.append(hog_features)
# Return list of feature vectors
return features
| [
"numpy.copy",
"numpy.histogram",
"cv2.resize",
"numpy.hstack",
"matplotlib.image.imread",
"numpy.concatenate",
"cv2.cvtColor",
"numpy.ravel",
"skimage.feature.hog"
] | [((296, 331), 'numpy.hstack', 'np.hstack', (['(color1, color2, color3)'], {}), '((color1, color2, color3))\n', (305, 331), True, 'import numpy as np\n'), ((490, 546), 'numpy.histogram', 'np.histogram', (['img[:, :, 0]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 0], bins=nbins, range=bins_range)\n', (502, 546), True, 'import numpy as np\n'), ((565, 621), 'numpy.histogram', 'np.histogram', (['img[:, :, 1]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 1], bins=nbins, range=bins_range)\n', (577, 621), True, 'import numpy as np\n'), ((640, 696), 'numpy.histogram', 'np.histogram', (['img[:, :, 2]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 2], bins=nbins, range=bins_range)\n', (652, 696), True, 'import numpy as np\n'), ((777, 847), 'numpy.concatenate', 'np.concatenate', (['(channel1_hist[0], channel2_hist[0], channel3_hist[0])'], {}), '((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n', (791, 847), True, 'import numpy as np\n'), ((1255, 1273), 'matplotlib.image.imread', 'mpimg.imread', (['file'], {}), '(file)\n', (1267, 1273), True, 'import matplotlib.image as mpimg\n'), ((2617, 2815), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'orient', 'pixels_per_cell': '(pix_per_cell, pix_per_cell)', 'cells_per_block': '(cell_per_block, cell_per_block)', 'transform_sqrt': '(False)', 'visualise': 'vis', 'feature_vector': 'feature_vec'}), '(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,\n visualise=vis, feature_vector=feature_vec)\n', (2620, 2815), False, 'from skimage.feature import hog\n'), ((3053, 3251), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'orient', 'pixels_per_cell': '(pix_per_cell, pix_per_cell)', 'cells_per_block': '(cell_per_block, cell_per_block)', 'transform_sqrt': '(False)', 'visualise': 'vis', 'feature_vector': 'feature_vec'}), '(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,\n visualise=vis, feature_vector=feature_vec)\n', (3056, 3251), False, 'from skimage.feature import hog\n'), ((3674, 3692), 'matplotlib.image.imread', 'mpimg.imread', (['file'], {}), '(file)\n', (3686, 3692), True, 'import matplotlib.image as mpimg\n'), ((148, 178), 'cv2.resize', 'cv2.resize', (['img[:, :, 0]', 'size'], {}), '(img[:, :, 0], size)\n', (158, 178), False, 'import cv2\n'), ((198, 228), 'cv2.resize', 'cv2.resize', (['img[:, :, 1]', 'size'], {}), '(img[:, :, 1], size)\n', (208, 228), False, 'import cv2\n'), ((248, 278), 'cv2.resize', 'cv2.resize', (['img[:, :, 2]', 'size'], {}), '(img[:, :, 2], size)\n', (258, 278), False, 'import cv2\n'), ((1912, 1926), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (1919, 1926), True, 'import numpy as np\n'), ((2305, 2354), 'numpy.concatenate', 'np.concatenate', (['(spatial_features, hist_features)'], {}), '((spatial_features, hist_features))\n', (2319, 2354), True, 'import numpy as np\n'), ((4331, 4345), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (4338, 4345), True, 'import numpy as np\n'), ((4773, 4795), 'numpy.ravel', 'np.ravel', (['hog_features'], {}), '(hog_features)\n', (4781, 4795), True, 'import numpy as np\n'), ((1419, 1457), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (1431, 1457), False, 'import cv2\n'), ((3838, 3876), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (3850, 3876), False, 'import cv2\n'), ((1524, 1562), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2LUV'], {}), '(image, cv2.COLOR_RGB2LUV)\n', (1536, 1562), False, 'import cv2\n'), ((3943, 3981), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2LUV'], {}), '(image, cv2.COLOR_RGB2LUV)\n', (3955, 3981), False, 'import cv2\n'), ((1629, 1667), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HLS'], {}), '(image, cv2.COLOR_RGB2HLS)\n', (1641, 1667), False, 'import cv2\n'), ((4048, 4086), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HLS'], {}), '(image, cv2.COLOR_RGB2HLS)\n', (4060, 4086), False, 'import cv2\n'), ((1734, 1772), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2YUV'], {}), '(image, cv2.COLOR_RGB2YUV)\n', (1746, 1772), False, 'import cv2\n'), ((4153, 4191), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2YUV'], {}), '(image, cv2.COLOR_RGB2YUV)\n', (4165, 4191), False, 'import cv2\n'), ((1841, 1881), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2YCrCb'], {}), '(image, cv2.COLOR_RGB2YCrCb)\n', (1853, 1881), False, 'import cv2\n'), ((4260, 4300), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2YCrCb'], {}), '(image, cv2.COLOR_RGB2YCrCb)\n', (4272, 4300), False, 'import cv2\n')] |
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
from numpy.random import random
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint
# placeholders to store the image and angle data
car_images = []
steering_angles = []
DATA_DIRS = ['/opt/carnd_p3/track1_recovery/','/opt/carnd_p3/data/']
for DATA_DIR in DATA_DIRS:
if DATA_DIR == '/opt/carnd_p3/data/':
driving_log = pd.read_csv(DATA_DIR + 'driving_log.csv') # read the driving log from the csv file
else:
driving_log = pd.read_csv(DATA_DIR + 'driving_log.csv',names=["center","left","right","steering","throttle","break","speed"])
# get the steering angle and apply correction factor to the left and right cameras
for _,line in driving_log.iterrows():
steering_center = float(line["steering"])
# drop 10% of straight driving images to balance curve driving samples
if (steering_center > 1) or ((steering_center <=1) and (random()>0.1)):
# create adjusted steering measurements for the side camera images
correction = 0.08 # this is a parameter to tune
steering_left = steering_center + correction
steering_right = steering_center - correction
path = DATA_DIR+"IMG/"
# data fetched from Linux
if DATA_DIR == '/opt/carnd_p3/data/':
split_token = '/'
else: # data fetched from Windows
split_token = '\\'
# read images from their path in the driving log
img_center = np.asarray(Image.open(path + line["center"].split(split_token)[-1]))
img_left = np.asarray(Image.open(path + line["left"].split(split_token)[-1]))
img_right = np.asarray(Image.open(path + line["right"].split(split_token)[-1]))
# add images and angles to data set
car_images.extend([img_center, img_left, img_right])
steering_angles.extend([steering_center, steering_left, steering_right])
# ## Augment Images
# apply horiontal flip to augment the datasets
augmented_images, augmented_measurements = [],[]
for image,measurement in zip(car_images,steering_angles):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X = np.array(augmented_images,ndmin=4)
y = np.array(augmented_measurements)
print(X.shape)
# Model Architecture. From https://developer.nvidia.com/blog/deep-learning-self-driving-cars/
def drivingModel():
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(64,(3,3),strides=(1,1),activation='relu'))
model.add(Conv2D(64,(3,3),strides=(1,1),activation='relu'))
model.add(Flatten())
model.add(Dense(1164,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(50,activation='relu'))
model.add(Dense(10,activation='relu'))
model.add(Dense(1))
return model
model = drivingModel()
print(model.summary())
# Callbacks
model_checkpoint_callback = ModelCheckpoint(
filepath='/home/workspace/CarND-Behavioral-Cloning-P3/model.h5',
save_best_only=True,
monitor='val_loss',
mode='min')
callbacks = [model_checkpoint_callback]
num_epochs=5
model.compile(loss='mse',optimizer='adam')
history_object = model.fit(X,y,epochs=num_epochs,validation_split=0.2,shuffle=True,callbacks=callbacks)
### plot the training and validation loss for each epoch
plt.figure()
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig("train_val_loss.png")
plt.close()
| [
"keras.layers.Conv2D",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"keras.layers.Dense",
"keras.layers.Cropping2D",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.savefig",
"keras.layers.Flatten",
"... | [((2615, 2650), 'numpy.array', 'np.array', (['augmented_images'], {'ndmin': '(4)'}), '(augmented_images, ndmin=4)\n', (2623, 2650), True, 'import numpy as np\n'), ((2654, 2686), 'numpy.array', 'np.array', (['augmented_measurements'], {}), '(augmented_measurements)\n', (2662, 2686), True, 'import numpy as np\n'), ((3618, 3761), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""/home/workspace/CarND-Behavioral-Cloning-P3/model.h5"""', 'save_best_only': '(True)', 'monitor': '"""val_loss"""', 'mode': '"""min"""'}), "(filepath=\n '/home/workspace/CarND-Behavioral-Cloning-P3/model.h5', save_best_only=\n True, monitor='val_loss', mode='min')\n", (3633, 3761), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4029, 4041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4039, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4042, 4082), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (4050, 4082), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4127), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (4091, 4127), True, 'import matplotlib.pyplot as plt\n'), ((4128, 4170), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (4137, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4208), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (4181, 4208), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4219, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4294), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (4239, 4294), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4328), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""train_val_loss.png"""'], {}), "('train_val_loss.png')\n", (4306, 4328), True, 'import matplotlib.pyplot as plt\n'), ((4329, 4340), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4338, 4340), True, 'import matplotlib.pyplot as plt\n'), ((2830, 2842), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2840, 2842), False, 'from keras.models import Sequential\n'), ((657, 698), 'pandas.read_csv', 'pd.read_csv', (["(DATA_DIR + 'driving_log.csv')"], {}), "(DATA_DIR + 'driving_log.csv')\n", (668, 698), True, 'import pandas as pd\n'), ((773, 895), 'pandas.read_csv', 'pd.read_csv', (["(DATA_DIR + 'driving_log.csv')"], {'names': "['center', 'left', 'right', 'steering', 'throttle', 'break', 'speed']"}), "(DATA_DIR + 'driving_log.csv', names=['center', 'left', 'right',\n 'steering', 'throttle', 'break', 'speed'])\n", (784, 895), True, 'import pandas as pd\n'), ((2539, 2557), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2547, 2557), False, 'import cv2\n'), ((2857, 2917), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (2863, 2917), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((2929, 2968), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (2939, 2968), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((2981, 3034), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(24, (5, 5), strides=(2, 2), activation='relu')\n", (2987, 3034), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3045, 3098), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(36, (5, 5), strides=(2, 2), activation='relu')\n", (3051, 3098), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3109, 3162), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(48, (5, 5), strides=(2, 2), activation='relu')\n", (3115, 3162), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3173, 3226), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""'}), "(64, (3, 3), strides=(1, 1), activation='relu')\n", (3179, 3226), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3237, 3290), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1, 1)', 'activation': '"""relu"""'}), "(64, (3, 3), strides=(1, 1), activation='relu')\n", (3243, 3290), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3301, 3310), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3308, 3310), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3326, 3356), 'keras.layers.Dense', 'Dense', (['(1164)'], {'activation': '"""relu"""'}), "(1164, activation='relu')\n", (3331, 3356), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3371, 3400), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3376, 3400), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3415, 3443), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (3420, 3443), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3458, 3486), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3463, 3486), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((3501, 3509), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3506, 3509), False, 'from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D\n'), ((1209, 1217), 'numpy.random.random', 'random', ([], {}), '()\n', (1215, 1217), False, 'from numpy.random import random\n')] |
#!/usr/bin/python3
"""
similarity_mapper2
"""
import sys
import pandas as pd
import numpy as np
big_data = pd.read_csv('ratings.csv')
all_films = np.unique(big_data.movieId)
for line in sys.stdin:
film, film_statistics = line.strip().split('\t', 1)
for f in all_films:
if int(film) < f:
print(film +','+ str(f) +'\t'+ film_statistics)
else:
print(str(f) +','+ film +'\t'+ film_statistics)
| [
"numpy.unique",
"pandas.read_csv"
] | [((111, 137), 'pandas.read_csv', 'pd.read_csv', (['"""ratings.csv"""'], {}), "('ratings.csv')\n", (122, 137), True, 'import pandas as pd\n'), ((150, 177), 'numpy.unique', 'np.unique', (['big_data.movieId'], {}), '(big_data.movieId)\n', (159, 177), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import cv2
from model.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob,prep_noise_for_blob
import pdb
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob,im_noise, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
blobs['noise']=im_noise
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
if num_classes<=2:
gt_inds = np.where(roidb[0]['gt_classes'] != 100)[0]
else:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
if num_classes<=2:
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
else:
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
#print(num_classes,gt_boxes)
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
return blobs
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
processed_noise = []
im_scales = []
for i in range(num_images):
#print(roidb[i]['image'])
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if roidb[i]['noised']:
row,col,ch = im.shape
for bb in roidb[i]['boxes']:
bcol = bb[2]-bb[0]
brow = bb[3]-bb[1]
mean = 0
var = 5
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(brow,bcol,ch))
gauss = gauss.reshape(brow,bcol,ch)
im = im.astype(np.float32, copy=False)
#pdb.set_trace()
#aa=im.copy()
#cv2.imwrite('t.png',im)
im[bb[1]:bb[3],bb[0]:bb[2],:]=im[bb[1]:bb[3],bb[0]:bb[2],:]+gauss
#pdb.set_trace()
#cc=aa-im
#cv2.imwrite('tmp.png',im)
#mean = 0
#var = 5
#sigma = var**0.5
#gauss = np.random.normal(mean,sigma,(row,col,ch))
#gauss = gauss.reshape(row,col,ch)
#im = im.astype(np.float32, copy=False)
#im = im+gauss
if roidb[i]['JPGed']:
for bb in roidb[i]['boxes']:
cv2.imwrite('JPGed.jpg',im[bb[1]:bb[3],bb[0]:bb[2],:],[cv2.IMWRITE_JPEG_QUALITY, 70])
bb_jpged=cv2.imread('JPGed.jpg')
im[bb[1]:bb[3],bb[0]:bb[2],:]=bb_jpged
#pdb.set_trace()
#cv2.imwrite('JPGed.jpg',im,[cv2.IMWRITE_JPEG_QUALITY, 70])
#im=cv2.imread('JPGed.jpg')
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
noise, im_scale = prep_noise_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
processed_noise.append(noise)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
noise_blob = im_list_to_blob(processed_noise)
return blob,noise_blob, im_scales
| [
"numpy.random.normal",
"cv2.imwrite",
"utils.blob.prep_noise_for_blob",
"numpy.where",
"numpy.array",
"utils.blob.prep_im_for_blob",
"utils.blob.im_list_to_blob",
"cv2.imread"
] | [((2179, 2264), 'numpy.array', 'np.array', (['[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]]'], {'dtype': 'np.float32'}), '([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32\n )\n', (2187, 2264), True, 'import numpy as np\n'), ((4255, 4285), 'utils.blob.im_list_to_blob', 'im_list_to_blob', (['processed_ims'], {}), '(processed_ims)\n', (4270, 4285), False, 'from utils.blob import prep_im_for_blob, im_list_to_blob, prep_noise_for_blob\n'), ((4301, 4333), 'utils.blob.im_list_to_blob', 'im_list_to_blob', (['processed_noise'], {}), '(processed_noise)\n', (4316, 4333), False, 'from utils.blob import prep_im_for_blob, im_list_to_blob, prep_noise_for_blob\n'), ((2570, 2599), 'cv2.imread', 'cv2.imread', (["roidb[i]['image']"], {}), "(roidb[i]['image'])\n", (2580, 2599), False, 'import cv2\n'), ((3901, 3971), 'utils.blob.prep_im_for_blob', 'prep_im_for_blob', (['im', 'cfg.PIXEL_MEANS', 'target_size', 'cfg.TRAIN.MAX_SIZE'], {}), '(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE)\n', (3917, 3971), False, 'from utils.blob import prep_im_for_blob, im_list_to_blob, prep_noise_for_blob\n'), ((4074, 4147), 'utils.blob.prep_noise_for_blob', 'prep_noise_for_blob', (['im', 'cfg.PIXEL_MEANS', 'target_size', 'cfg.TRAIN.MAX_SIZE'], {}), '(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE)\n', (4093, 4147), False, 'from utils.blob import prep_im_for_blob, im_list_to_blob, prep_noise_for_blob\n'), ((1459, 1498), 'numpy.where', 'np.where', (["(roidb[0]['gt_classes'] != 100)"], {}), "(roidb[0]['gt_classes'] != 100)\n", (1467, 1498), True, 'import numpy as np\n'), ((1528, 1565), 'numpy.where', 'np.where', (["(roidb[0]['gt_classes'] != 0)"], {}), "(roidb[0]['gt_classes'] != 0)\n", (1536, 1565), True, 'import numpy as np\n'), ((2877, 2924), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', '(brow, bcol, ch)'], {}), '(mean, sigma, (brow, bcol, ch))\n', (2893, 2924), True, 'import numpy as np\n'), ((3533, 3627), 'cv2.imwrite', 'cv2.imwrite', (['"""JPGed.jpg"""', 'im[bb[1]:bb[3], bb[0]:bb[2], :]', '[cv2.IMWRITE_JPEG_QUALITY, 70]'], {}), "('JPGed.jpg', im[bb[1]:bb[3], bb[0]:bb[2], :], [cv2.\n IMWRITE_JPEG_QUALITY, 70])\n", (3544, 3627), False, 'import cv2\n'), ((3636, 3659), 'cv2.imread', 'cv2.imread', (['"""JPGed.jpg"""'], {}), "('JPGed.jpg')\n", (3646, 3659), False, 'import cv2\n')] |
#!/usr/bin/env python
import os
import numpy as np
from scipy.io import loadmat
print('Loading movie ratings dataset.\n\n')
os.chdir("/home/mgaber/Workbench/ML/Week9/exercise/ex8/")
# % Load movie data
load_data = loadmat('ex8_movies.mat')
Y = load_data['Y']
R = load_data['R']
# We should try to plot
# imagesc(Y);
# ylabel('Movies');
# xlabel('Users');
# load movie params
print('Loading features')
load_data = loadmat('ex8_movieParams.mat')
Theta = load_data['Theta']
X = load_data['X']
num_users = load_data['num_users'].flatten()[0]
num_movies = load_data['num_movies'].flatten()[0]
num_features = load_data['num_features'].flatten()[0]
num_features.flatten()[0]
def cofiCostFunc(X, Theta, Y, R, num_users, num_movies, num_features, lam=0):
X = X.reshape(num_movies, num_features)
Theta = Theta.reshape(num_users, num_features)
Y = Y.reshape(num_movies, num_users)
R = R.reshape(num_movies, num_users)
# Theta.shape
# X = X.reshape()
# print(X.shape)
# print(R.shape)
# Theta = Theta.reshape(Theta.shape[0] * Theta.shape[1], 1)
# print(Theta.shape)
reg = (lam / 2) * (np.sum(np.sum(np.square(Theta)) + np.sum(np.square(X))))
# X_grad = np.product(R, (X* np.transpose(Theta) ) - Y ) * X + lam * Theta
X_grad = (np.prod(X, np.transpose(Theta)) - Y) * X + lam * Theta
# print(reg, X_grad)
# pass
J = cofiCostFunc(X, Theta, Y, R, num_users, num_movies, num_features, 0)
| [
"os.chdir",
"numpy.transpose",
"scipy.io.loadmat",
"numpy.square"
] | [((126, 183), 'os.chdir', 'os.chdir', (['"""/home/mgaber/Workbench/ML/Week9/exercise/ex8/"""'], {}), "('/home/mgaber/Workbench/ML/Week9/exercise/ex8/')\n", (134, 183), False, 'import os\n'), ((217, 242), 'scipy.io.loadmat', 'loadmat', (['"""ex8_movies.mat"""'], {}), "('ex8_movies.mat')\n", (224, 242), False, 'from scipy.io import loadmat\n'), ((418, 448), 'scipy.io.loadmat', 'loadmat', (['"""ex8_movieParams.mat"""'], {}), "('ex8_movieParams.mat')\n", (425, 448), False, 'from scipy.io import loadmat\n'), ((1142, 1158), 'numpy.square', 'np.square', (['Theta'], {}), '(Theta)\n', (1151, 1158), True, 'import numpy as np\n'), ((1169, 1181), 'numpy.square', 'np.square', (['X'], {}), '(X)\n', (1178, 1181), True, 'import numpy as np\n'), ((1292, 1311), 'numpy.transpose', 'np.transpose', (['Theta'], {}), '(Theta)\n', (1304, 1311), True, 'import numpy as np\n')] |
import inspect
import logging
from numpy import exp, log, average
from .metric_directionality import greater_is_better, best_in_series, idxbest
def random_model_group(df, train_end_time, n=1):
"""Pick a random model group (as a baseline)
Arguments:
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest current raw metric value
"""
return df["model_group_id"].drop_duplicates().sample(frac=1).tolist()[:n]
def _mg_best_avg_by(df, value_col, metric, n=1):
"""Best model group in dataframe by average of some column
Args:
df (pandas.DataFrame)
value_col (str) The column which contains the value to be averaged
metric (str) the name of the column
n (int) numbers of model group id
"""
if n == 1:
return [
getattr(
df.groupby(["model_group_id"])[value_col].mean().sample(frac=1),
idxbest(metric),
)()
]
else:
if greater_is_better(metric):
return (
df.groupby(["model_group_id"])[value_col]
.mean()
.nlargest(n)
.index.tolist()
)
else:
return (
df.groupby(["model_group_id"])[value_col]
.mean()
.nsmallest(n)
.index.tolist()
)
def best_current_value(df, train_end_time, metric, parameter, n=1):
"""Pick the model group with the best current metric value
Arguments:
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
dist_from_best_case
n (int) numbers of model group id
Returns: (int) the model group id to select, with highest current raw metric value
"""
curr_df = df.loc[
(df["train_end_time"] == train_end_time)
& (df["metric"] == metric)
& (df["parameter"] == parameter)
]
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
best_raw_value = getattr(curr_df["raw_value"], best_in_series(metric))()
if n <= 1:
return (
curr_df.loc[curr_df["raw_value"] == best_raw_value, "model_group_id"]
.sample(frac=1)
.tolist()
)
else:
if greater_is_better(metric):
result = curr_df.nlargest(n, "raw_value")["model_group_id"].tolist()
return result
else:
result = curr_df.nsmallest(n, "raw_value")["model_group_id"].tolist()
return result
def best_average_value(df, train_end_time, metric, parameter, n=1):
"""Pick the model with the highest average metric value so far
Arguments:
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
dist_from_best_case
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
met_df = df.loc[(df["metric"] == metric) & (df["parameter"] == parameter)]
return _mg_best_avg_by(met_df, "raw_value", metric, n)
def lowest_metric_variance(df, train_end_time, metric, parameter, n=1):
"""Pick the model with the lowest metric variance so far
Arguments:
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
met_df = (
df.loc[(df["metric"] == metric) & (df["parameter"] == parameter)]
.groupby(["model_group_id"])["raw_value"]
.std()
)
if met_df.isnull().sum() == met_df.shape[0]:
# variance will be undefined in first time window since we only have one obseravtion
# per model group
logging.info(
"Null metric variances for %s %s at %s; picking at random",
metric,
parameter,
train_end_time,
)
return df["model_group_id"].drop_duplicates().sample(n=n).tolist()
elif met_df.isnull().sum() > 0:
# the variances should be all null or no nulls, a mix shouldn't be possible
# since we should have the same number of observations for every model group
raise ValueError(
"Mix of null and non-null metric variances for or {} {} at {}".format(
metric, parameter, train_end_time
)
)
if n == 1:
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
return [met_df.sample(frac=1).idxmin()]
else:
return met_df.nsmallest(n).index.tolist()
def most_frequent_best_dist(
df, train_end_time, metric, parameter, dist_from_best_case, n=1
):
"""Pick the model that is most frequently within `dist_from_best_case` from the
best-performing model group across test sets so far
Arguments:
dist_from_best_case (float) -- distance from the best performing model
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
met_df = df.loc[(df["metric"] == metric) & (df["parameter"] == parameter)]
met_df["within_dist"] = (df["dist_from_best_case"] <= dist_from_best_case).astype(
"int"
)
if n == 1:
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
return [
met_df.groupby(["model_group_id"])["within_dist"]
.mean()
.sample(frac=1)
.idxmax()
]
else:
return (
met_df.groupby(["model_group_id"])["within_dist"]
.mean()
.nlargest(n)
.index.tolist()
)
def best_average_two_metrics(
df,
train_end_time,
metric1,
parameter1,
metric2,
parameter2,
metric1_weight=0.5,
n=1,
):
"""Pick the model with the highest average combined value to date
of two metrics weighted together using `metric1_weight`
Arguments:
metric1_weight (float) -- relative weight of metric1, between 0 and 1
metric1 (string) -- model evaluation metric, such as 'precision@'
parameter1 (string) -- model evaluation metric parameter,
such as '300_abs'
metric2 (string) -- model evaluation metric, such as 'precision@'
parameter2 (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
if metric1_weight < 0 or metric1_weight > 1:
raise ValueError("Metric weight must be between 0 and 1")
metric1_dir = greater_is_better(metric1)
metric2_dir = greater_is_better(metric2)
if metric1_dir != metric2_dir:
raise ValueError("Metric directionalities must be the same")
met_df = df.loc[
((df["metric"] == metric1) & (df["parameter"] == parameter1))
| ((df["metric"] == metric2) & (df["parameter"] == parameter2))
]
met_df.loc[
(met_df["metric"] == metric1) & (met_df["parameter"] == parameter1),
"weighted_raw",
] = (
met_df.loc[
(met_df["metric"] == metric1) & (met_df["parameter"] == parameter1),
"raw_value",
]
* metric1_weight
)
met_df.loc[
(met_df["metric"] == metric2) & (met_df["parameter"] == parameter2),
"weighted_raw",
] = met_df.loc[
(met_df["metric"] == metric2) & (met_df["parameter"] == parameter2), "raw_value"
] * (
1.0 - metric1_weight
)
met_df_wt = met_df.groupby(
["model_group_id", "train_end_time"], as_index=False
).sum()
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
return _mg_best_avg_by(met_df_wt, "weighted_raw", metric1, n)
def best_avg_var_penalized(df, train_end_time, metric, parameter, stdev_penalty, n=1):
"""Pick the model with the highest
average metric value so far, placing less weight in older
results. You need to specify two parameters: the shape of how the
weight affects points (decay_type, linear or exponential) and the relative
weight of the most recent point (curr_weight).
Arguments:
stdev_penalty (float) -- penalty for instability
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
# for metrics where smaller values are better, the penalty for instability should
# add to the mean, so introduce a factor of -1
stdev_penalty = stdev_penalty if greater_is_better(metric) else -1.0 * stdev_penalty
met_df = df.loc[(df["metric"] == metric) & (df["parameter"] == parameter)]
met_df_grp = met_df.groupby(["model_group_id"]).aggregate(
{"raw_value": ["mean", "std"]}
)
met_df_grp.columns = met_df_grp.columns.droplevel(0)
met_df_grp.columns = ["raw_avg", "raw_stdev"]
if met_df_grp["raw_stdev"].isnull().sum() == met_df_grp.shape[0]:
# variance will be undefined in first time window since we only have one obseravtion
# per model group
logging.info(
"Null metric variances for %s %s at %s; just using mean",
metric,
parameter,
train_end_time,
)
return [getattr(met_df_grp["raw_avg"].sample(frac=1), idxbest(metric))()]
elif met_df_grp["raw_stdev"].isnull().sum() > 0:
# the variances should be all null or no nulls, a mix shouldn't be possible
# since we should have the same number of observations for every model group
raise ValueError(
"Mix of null and non-null metric variances for or {} {} at {}".format(
metric, parameter, train_end_time
)
)
min_stdev = met_df_grp["raw_stdev"].min()
met_df_grp["penalized_avg"] = met_df_grp["raw_avg"] - stdev_penalty * (
met_df_grp["raw_stdev"] - min_stdev
)
if n == 1:
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
return [getattr(met_df_grp["penalized_avg"].sample(frac=1), idxbest(metric))()]
else:
if greater_is_better(metric):
return met_df_grp["penalized_avg"].nlargest(n).index.tolist()
else:
return met_df_grp["penalized_avg"].nsmallest(n).index.tolist()
def best_avg_recency_weight(
df, train_end_time, metric, parameter, curr_weight, decay_type, n=1
):
"""Pick the model with the highest average metric value so far, penalized
for relative variance as:
avg_value - (stdev_penalty) * (stdev - min_stdev)
where min_stdev is the minimum standard deviation of the metric across all
model groups
Arguments:
decay_type (string) -- either 'linear' or 'exponential'; the shape of
how the weights fall off between the current and first point
curr_weight (float) -- amount of weight to put on the most recent point,
relative to the first point (e.g., a value of 5.0 would mean the
current data is weighted 5 times as much as the first one)
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
# curr_weight is amount of weight to put on current point, relative to the first point
# (e.g., if the first point has a weight of 1.0)
# decay type is linear or exponetial
first_date = df["train_end_time"].min()
df["days_out"] = (df["train_end_time"] - first_date).apply(lambda x: float(x.days))
tmax = df["days_out"].max()
if tmax == 0:
# only one date (must be on first time point), so everything gets a weight of 1
df["weight"] = 1.0
elif decay_type == "linear":
# weight = (curr_weight - 1.0) * (t/tmax) + 1.0
df["weight"] = (curr_weight - 1.0) * (df["days_out"] / tmax) + 1.0
elif decay_type == "exponential":
# weight = exp(ln(curr_weight)*t/tmax)
df["weight"] = exp(log(curr_weight) * df["days_out"] / tmax)
else:
raise ValueError("Must specify linear or exponential decay type")
def wm(x):
return average(x, weights=df.loc[x.index, "weight"])
met_df = df.loc[(df["metric"] == metric) & (df["parameter"] == parameter)]
if n == 1:
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
result = getattr(
met_df.groupby(["model_group_id"])
.aggregate({"raw_value": wm})
.sample(frac=1),
idxbest(metric),
)()
return result.tolist()
else:
met_df_grp = met_df.groupby(["model_group_id"]).aggregate({"raw_value": wm})
if greater_is_better(metric):
return met_df_grp["raw_value"].nlargest(n).index.tolist()
else:
return met_df_grp["raw_value"].nsmallest(n).index.tolist()
SELECTION_RULES = {
"random_model_group": random_model_group,
"best_current_value": best_current_value,
"best_average_value": best_average_value,
"lowest_metric_variance": lowest_metric_variance,
"most_frequent_best_dist": most_frequent_best_dist,
"best_average_two_metrics": best_average_two_metrics,
"best_avg_var_penalized": best_avg_var_penalized,
"best_avg_recency_weight": best_avg_recency_weight,
}
class BoundSelectionRule(object):
"""A selection rule bound with a set of arguments
Args:
args (dict) A set of keyword arguments, that should be sufficient
to call the function when a dataframe and train_end_time is added
function_name (string, optional) The name of a function in SELECTION_RULES
descriptive_name (string, optional) A descriptive name, used in charts
If none is given it will be automatically constructed
function (function, optional) A function
"""
def __init__(self, args, function_name=None, descriptive_name=None, function=None):
if not function_name and not function:
raise ValueError("Need either function_name or function")
if not descriptive_name and not function_name:
raise ValueError("Need either descriptive_name or function_name")
self.args = args
self.function_name = function_name
self._function = function
self._descriptive_name = descriptive_name
@property
def function(self):
if not self._function:
self._function = SELECTION_RULES[self.function_name]
return self._function
@property
def descriptive_name(self):
if not self._descriptive_name:
self._descriptive_name = self._build_descriptive_name()
return self._descriptive_name
def __str__(self):
return self.descriptive_name
def _build_descriptive_name(self):
"""Build a descriptive name for the bound selection rule
Constructed using the function name and arguments.
"""
argspec = inspect.getargspec(self.function)
args = [arg for arg in argspec.args if arg not in ["df", "train_end_time", "n"]]
return "_".join([self.function_name] + [str(self.args[key]) for key in args])
def pick(self, dataframe, train_end_time):
"""Run the selection rule for a given time on a dataframe
Args:
dataframe (pandas.DataFrame)
train_end_time (timestamp) Current train end time
Returns: (int) a model group id
"""
return self.function(dataframe, train_end_time, **(self.args))
| [
"numpy.log",
"inspect.getargspec",
"logging.info",
"numpy.average"
] | [((5345, 5456), 'logging.info', 'logging.info', (['"""Null metric variances for %s %s at %s; picking at random"""', 'metric', 'parameter', 'train_end_time'], {}), "('Null metric variances for %s %s at %s; picking at random',\n metric, parameter, train_end_time)\n", (5357, 5456), False, 'import logging\n'), ((12099, 12208), 'logging.info', 'logging.info', (['"""Null metric variances for %s %s at %s; just using mean"""', 'metric', 'parameter', 'train_end_time'], {}), "('Null metric variances for %s %s at %s; just using mean',\n metric, parameter, train_end_time)\n", (12111, 12208), False, 'import logging\n'), ((15647, 15692), 'numpy.average', 'average', (['x'], {'weights': "df.loc[x.index, 'weight']"}), "(x, weights=df.loc[x.index, 'weight'])\n", (15654, 15692), False, 'from numpy import exp, log, average\n'), ((18476, 18509), 'inspect.getargspec', 'inspect.getargspec', (['self.function'], {}), '(self.function)\n', (18494, 18509), False, 'import inspect\n'), ((15490, 15506), 'numpy.log', 'log', (['curr_weight'], {}), '(curr_weight)\n', (15493, 15506), False, 'from numpy import exp, log, average\n')] |
import talib
import numpy as np
import jtrade.core.instrument.equity as Equity
# ========== TECH OVERLAP INDICATORS **START** ==========
def BBANDS(equity, start=None, end=None, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
"""Bollinger Bands
:param timeperiod:
:param nbdevup:
:param nbdevdn:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
upperband, middleband, lowerband = talib.BBANDS(close, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)
return upperband, middleband, lowerband
def DEMA(equity, start=None, end=None, timeperiod=30):
"""Double Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DEMA(close, timeperiod=timeperiod)
return real
def EMA(equity, start=None, end=None, timeperiod=30):
"""Exponential Moving Average
NOTE: The EMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.EMA(close, timeperiod=timeperiod)
return real
def HT_TRENDLINE(equity, start=None, end=None):
"""Hilbert Transform - Instantaneous Trendline
NOTE: The HT_TRENDLINE function has an unstable period.
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.HT_TRENDLINE(close)
return real
def KAMA(equity, start=None, end=None, timeperiod=30):
"""Kaufman Adaptive Moving Average
NOTE: The KAMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.KAMA(close, timeperiod=timeperiod)
return real
def MA(equity, start=None, end=None, timeperiod=30, matype=0):
"""Moving average
:param timeperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MA(close, timeperiod=timeperiod, matype=matype)
return real
def MAMA(equity, start=None, end=None, fastlimit=0, slowlimit=0):
"""MESA Adaptive Moving Average
NOTE: The MAMA function has an unstable period.
:param fastlimit:
:param slowlimit:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
mama, fama = talib.MAMA(close, fastlimit=fastlimit, slowlimit=slowlimit)
return mama, fama
def MAVP(equity, periods, start=None, end=None, minperiod=2, maxperiod=30, matype=0):
"""Moving average with variable period
:param periods:
:param minperiod:
:param maxperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MAVP(close, periods, minperiod=minperiod, maxperiod=maxperiod, matype=matype)
return real
def MIDPOINT(equity, start=None, end=None, timeperiod=14):
"""MidPoint over period
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MIDPOINT(close, timeperiod=timeperiod)
return real
def MIDPRICE(equity, start=None, end=None, timeperiod=14):
"""Midpoint Price over period
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MIDPRICE(high, low, timeperiod=timeperiod)
return real
def SAR(equity, start=None, end=None, acceleration=0, maximum=0):
"""Parabolic SAR
:param acceleration:
:param maximum:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAR(high, low, acceleration=acceleration, maximum=maximum)
return real
def SAREXT(equity, start=None, end=None, startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0,
accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0):
"""Parabolic SAR - Extended
:param startvalue:
:param offsetonreverse:
:param accelerationinitlong:
:param accelerationlong:
:param accelerationmaxlong:
:param accelerationinitshort:
:param accelerationshort:
:param accelerationmaxshort:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAREXT(high, low, startvalue=startvalue, offsetonreverse=offsetonreverse, accelerationinitlong=accelerationinitlong,
accelerationlong=accelerationlong, accelerationmaxlong=accelerationmaxlong, accelerationinitshort=accelerationinitshort,
accelerationshort=accelerationshort, accelerationmaxshort=accelerationmaxshort)
return real
def SMA(equity, start=None, end=None, timeperiod=30):
"""Simple Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.SMA(close, timeperiod=timeperiod)
return real
def T3(equity, start=None, end=None, timeperiod=5, vfactor=0):
"""Triple Exponential Moving Average (T3)
NOTE: The T3 function has an unstable period.
:param timeperiod:
:param vfactor:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.T3(close, timeperiod=timeperiod, vfactor=vfactor)
return real
def TEMA(equity, start=None, end=None, timeperiod=30):
"""Triple Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TEMA(close, timeperiod=timeperiod)
return real
def TRIMA(equity, start=None, end=None, timeperiod=30):
"""Triangular Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIMA(close, timeperiod=timeperiod)
return real
def WMA(equity, start=None, end=None, timeperiod=30):
"""Weighted Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WMA(close, timeperiod=timeperiod)
return real
# ========== TECH OVERLAP INDICATORS **END** ==========
# ========== TECH MOMENTUM INDICATORS **START** ==========
def ADX(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index
NOTE: The ADX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADX(high, low, close, timeperiod=timeperiod)
return real
def ADXR(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index Rating
NOTE: The ADXR function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADXR(high, low, close, timeperiod=timeperiod)
return real
def APO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Absolute Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.APO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def AROON(equity, start=None, end=None, timeperiod=14):
"""Aroon
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
aroondown, aroonup = talib.AROON(high, low, timeperiod=timeperiod)
return aroondown, aroonup
def AROONOSC(equity, start=None, end=None, timeperiod=14):
"""Aroon Oscillator
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.AROONOSC(high, low, timeperiod=timeperiod)
return real
def BOP(equity, start=None, end=None):
"""Balance Of Power
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.BOP(opn, high, low, close)
return real
def CCI(equity, start=None, end=None, timeperiod=14):
"""Commodity Channel Index
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CCI(high, low, close, timeperiod=timeperiod)
return real
def CMO(equity, start=None, end=None, timeperiod=14):
"""Chande Momentum Oscillator
NOTE: The CMO function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CMO(close, timeperiod=timeperiod)
return real
def DX(equity, start=None, end=None, timeperiod=14):
"""Directional Movement Index
NOTE: The DX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DX(high, low, close, timeperiod=timeperiod)
return real
def MACD(equity, start=None, end=None, fastperiod=12, slowperiod=26, signalperiod=9):
"""Moving Average Convergence/Divergence
:param fastperiod:
:param slowperiod:
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACD(close, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MACDEXT(equity, start=None, end=None, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
"""MACD with controllable MA type
:param fastperiod:
:param fastmatype:
:param slowperiod:
:param slowmatype:
:param signalperiod:
:param signalmatype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDEXT(close, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0,
signalperiod=9, signalmatype=0)
return macd, macdsignal, macdhist
def MACDFIX(equity, start=None, end=None, signalperiod=9):
"""Moving Average Convergence/Divergence Fix 12/26
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDFIX(close, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MFI(equity, start=None, end=None, timeperiod=14):
"""Money Flow Index
NOTE: The MFI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
volume = np.array(equity.hp.loc[start:end, 'volume'], dtype='f8')
real = talib.MFI(high, low, close, volume, timeperiod=timeperiod)
return real
def MINUS_DI(equity, start=None, end=None, timeperiod=14):
"""Minus Directional signal
NOTE: The MINUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MINUS_DI(high, low, close, timeperiod=timeperiod)
return real
def MINUS_DM(equity, start=None, end=None, timeperiod=14):
"""Minus Directional Movement
NOTE: The MINUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MINUS_DM(high, low, timeperiod=timeperiod)
return real
def MOM(equity, start=None, end=None, timeperiod=10):
"""Momentum
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MOM(close, timeperiod=timeperiod)
return real
def PLUS_DI(equity, start=None, end=None, timeperiod=14):
"""Plus Directional signal
NOTE: The PLUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PLUS_DI(high, low, close, timeperiod=timeperiod)
return real
def PLUS_DM(equity, start=None, end=None, timeperiod=14):
"""Plus Directional Movement
NOTE: The PLUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.PLUS_DM(high, low, timeperiod=timeperiod)
return real
def PPO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Percentage Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PPO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def ROC(equity, start=None, end=None, timeperiod=10):
"""Rate of change : ((price/prevPrice)-1)*100
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROC(close, timeperiod=timeperiod)
return real
def ROCP(equity, start=None, end=None, timeperiod=10):
"""Rate of change Percentage: (price-prevPrice)/prevPrice
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCP(close, timeperiod=timeperiod)
return real
def ROCR(equity, start=None, end=None, timeperiod=10):
"""Rate of change ratio: (price/prevPrice)
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCR(close, timeperiod=timeperiod)
return real
def ROCR100(equity, start=None, end=None, timeperiod=10):
"""Rate of change ratio 100 scale: (price/prevPrice)*100
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCR100(close, timeperiod=timeperiod)
return real
def RSI(equity, start=None, end=None, timeperiod=14):
"""Relative Strength Index
NOTE: The RSI function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.RSI(close, timeperiod=timeperiod)
return real
def STOCH(equity, start=None, end=None, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0):
"""Stochastic
:param fastk_period:
:param slowk_period:
:param slowk_matype:
:param slowd_period:
:param slowd_matype:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
slowk, slowd = talib.STOCH(high, low, close, fastk_period=fastk_period, slowk_period=slowk_period,
slowk_matype=slowk_matype, slowd_period=slowd_period, slowd_matype=slowd_matype)
return slowk, slowd
def STOCHF(equity, start=None, end=None, fastk_period=5, fastd_period=3, fastd_matype=0):
"""Stochastic Fast
:param fastk_period:
:param fastd_period:
:param fastd_matype:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
fastk, fastd = talib.STOCHF(high, low, close, fastk_period=fastk_period, fastd_period=fastd_period,
fastd_matype=fastd_matype)
return fastk, fastd
def STOCHRSI(equity, start=None, end=None, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0):
"""Stochastic Relative Strength Index
NOTE: The STOCHRSI function has an unstable period.
:param timeperiod:
:param fastk_period:
:param fastd_period:
:param fastd_matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
fastk, fastd = talib.STOCHRSI(close, timeperiod=timeperiod, fastk_period=fastk_period,
fastd_period=fastd_period, fastd_matype=fastd_matype)
return fastk, fastd
def TRIX(equity, start=None, end=None, timeperiod=30):
"""1-day Rate-Of-Change (ROC) of a Triple Smooth EMA
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIX(close, timeperiod=timeperiod)
return real
def ULTOSC(equity, start=None, end=None, timeperiod1=7, timeperiod2=14, timeperiod3=28):
"""Ultimate Oscillator
:param timeperiod1:
:param timeperiod2:
:param timeperiod3:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ULTOSC(high, low, close, timeperiod1=timeperiod1, timeperiod2=timeperiod2, timeperiod3=timeperiod3)
def WILLR(equity, start=None, end=None, timeperiod=14):
"""Williams' %R
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WILLR(high, low, close, timeperiod=14)
return real
# ========== TECH MOMENTUM INDICATORS **END** ==========
# ========== PRICE TRANSFORM FUNCTIONS **START** ==========
def AVGPRICE(equity, start=None, end=None):
"""Average Price
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.AVGPRICE(opn, high, low, close)
return real
def MEDPRICE(equity, start=None, end=None):
"""Median Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MEDPRICE(high, low)
return real
def TYPPRICE(equity, start=None, end=None):
"""Typical Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TYPPRICE(high, low, close)
return real
def WCLPRICE(equity, start=None, end=None):
"""Weighted Close Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WCLPRICE(high, low, close)
return real
# ========== PRICE TRANSFORM FUNCTIONS **END** ==========
# ========== PATTERN RECOGNITION FUNCTIONS **START** ==========
def CDL2CROWS(equity, start=None, end=None):
"""Two Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL2CROWS(opn, high, low, close)
return integer
def CDL3BLACKCROWS(equity, start=None, end=None):
"""Three Black Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3BLACKCROWS(opn, high, low, close)
return integer
def CDL3INSIDE(equity, start=None, end=None):
"""Three Inside Up/Down
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3INSIDE(opn, high, low, close)
return integer
def CDL3LINESTRIKE(equity, start=None, end=None):
"""Three-Line Strike
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3LINESTRIKE(opn, high, low, close)
return integer
def CDL3OUTSIDE(equity, start=None, end=None):
"""Three Outside Up/Down
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3OUTSIDE(opn, high, low, close)
return integer
def CDL3STARSINSOUTH(equity, start=None, end=None):
"""Three Stars In The South
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3STARSINSOUTH(opn, high, low, close)
return integer
def CDL3WHITESOLDIERS(equity, start=None, end=None):
"""Three Advancing White Soldiers
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3WHITESOLDIERS(opn, high, low, close)
return integer
def CDLABANDONEDBABY(equity, start=None, end=None, penetration=0):
"""Abandoned Baby
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLABANDONEDBABY(opn, high, low, close, penetration=penetration)
return integer
def CDLADVANCEBLOCK(equity, start=None, end=None):
"""Advance Block
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLADVANCEBLOCK(opn, high, low, close)
return integer
def CDLBELTHOLD(equity, start=None, end=None):
"""Belt-hold
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLBELTHOLD(opn, high, low, close)
return integer
def CDLBREAKAWAY(equity, start=None, end=None):
"""Breakaway
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLBREAKAWAY(opn, high, low, close)
return integer
def CDLCLOSINGMARUBOZU(equity, start=None, end=None):
"""Closing Marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCLOSINGMARUBOZU(opn, high, low, close)
return integer
def CDLCONCEALBABYSWALL(equity, start=None, end=None):
"""Concealing Baby Swallow
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCONCEALBABYSWALL(opn, high, low, close)
return integer
def CDLCOUNTERATTACK(equity, start=None, end=None):
"""Counterattack
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCOUNTERATTACK(opn, high, low, close)
return integer
def CDLDARKCLOUDCOVER(equity, start=None, end=None, penetration=0):
"""Dark Cloud Cover
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDARKCLOUDCOVER(opn, high, low, close, penetration=penetration)
return integer
def CDLDOJI(equity, start=None, end=None):
"""Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDOJI(opn, high, low, close)
return integer
def CDLDOJISTAR(equity, start=None, end=None):
"""Doji Star
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDOJISTAR(opn, high, low, close)
return integer
def CDLDRAGONFLYDOJI(equity, start=None, end=None):
"""Dragonfly Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDRAGONFLYDOJI(opn, high, low, close)
return integer
def CDLENGULFING(equity, start=None, end=None):
"""Engulfing Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLENGULFING(opn, high, low, close)
return integer
def CDLEVENINGDOJISTAR(equity, start=None, end=None, penetration=0):
"""Evening Doji Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLEVENINGDOJISTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLEVENINGSTAR(equity, start=None, end=None, penetration=0):
"""Evening Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLEVENINGSTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLGAPSIDESIDEWHITE(equity, start=None, end=None):
"""Up/Down-gap side-by-side white lines
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLGAPSIDESIDEWHITE(opn, high, low, close)
return integer
def CDLGRAVESTONEDOJI(equity, start=None, end=None):
"""Gravestone Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLGRAVESTONEDOJI(opn, high, low, close)
return integer
def CDLHAMMER(equity, start=None, end=None):
"""Hammer
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHAMMER(opn, high, low, close)
return integer
def CDLHANGINGMAN(equity, start=None, end=None):
"""Hanging Man
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHANGINGMAN(opn, high, low, close)
return integer
def CDLHARAMI(equity, start=None, end=None):
"""Harami Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHARAMI(opn, high, low, close)
return integer
def CDLHARAMICROSS(equity, start=None, end=None):
"""Harami Cross Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHARAMICROSS(opn, high, low, close)
return integer
def CDLHIGHWAVE(equity, start=None, end=None):
"""High-Wave Candle
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHIGHWAVE(opn, high, low, close)
return integer
def CDLHIKKAKE(equity, start=None, end=None):
"""Hikkake Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHIKKAKE(opn, high, low, close)
return integer
def CDLHIKKAKEMOD(equity, start=None, end=None):
"""Modified Hikkake Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHIKKAKEMOD(opn, high, low, close)
return integer
def CDLHOMINGPIGEON(equity, start=None, end=None):
"""Homing Pigeon
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHOMINGPIGEON(opn, high, low, close)
return integer
def CDLIDENTICAL3CROWS(equity, start=None, end=None):
"""Identical Three Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLIDENTICAL3CROWS(opn, high, low, close)
return integer
def CDLINNECK(equity, start=None, end=None):
"""In-Neck Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLINNECK(opn, high, low, close)
return integer
def CDLINVERTEDHAMMER(equity, start=None, end=None):
"""Inverted Hammer
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLINVERTEDHAMMER(opn, high, low, close)
return integer
def CDLKICKING(equity, start=None, end=None):
"""Kicking
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLKICKING(opn, high, low, close)
return integer
def CDLKICKINGBYLENGTH(equity, start=None, end=None):
"""Kicking - bull/bear determined by the longer marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLKICKINGBYLENGTH(opn, high, low, close)
return integer
def CDLLADDERBOTTOM(equity, start=None, end=None):
"""Ladder Bottom
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLLADDERBOTTOM(opn, high, low, close)
return integer
def CDLLONGLEGGEDDOJI(equity, start=None, end=None):
"""Long Legged Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLLONGLEGGEDDOJI(opn, high, low, close)
return integer
def CDLLONGLINE(equity, start=None, end=None):
"""Long Line Candle
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLLONGLINE(opn, high, low, close)
return integer
def CDLMARUBOZU(equity, start=None, end=None):
"""Marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMARUBOZU(opn, high, low, close)
return integer
def CDLMATCHINGLOW(equity, start=None, end=None):
"""Matching Low
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMATCHINGLOW(opn, high, low, close)
return integer
def CDLMATHOLD(equity, start=None, end=None, penetration=0):
"""Mat Hold
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMATHOLD(opn, high, low, close, penetration=penetration)
return integer
def CDLMORNINGDOJISTAR(equity, start=None, end=None, penetration=0):
"""Morning Doji Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMORNINGDOJISTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLMORNINGSTAR(equity, start=None, end=None, penetration=0):
"""Morning Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMORNINGSTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLONNECK(equity, start=None, end=None):
"""On-Neck Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLONNECK(opn, high, low, close)
return integer
def CDLPIERCING(equity, start=None, end=None):
"""Piercing Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLPIERCING(opn, high, low, close)
return integer
def CDLRICKSHAWMAN(equity, start=None, end=None):
"""Rickshaw Man
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLRICKSHAWMAN(opn, high, low, close)
return integer
def CDLRISEFALL3METHODS(equity, start=None, end=None):
"""Rising/Falling Three Methods
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLRISEFALL3METHODS(opn, high, low, close)
return integer
def CDLSEPARATINGLINES(equity, start=None, end=None):
"""Separating Lines
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLSEPARATINGLINES(opn, high, low, close)
return integer
def CDLSHOOTINGSTAR(equity, start=None, end=None):
"""Shooting Star
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLSHOOTINGSTAR(opn, high, low, close)
return integer
def CDLSHORTLINE(equity, start=None, end=None):
"""Short Line Candle
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLSHORTLINE(opn, high, low, close)
return integer
def CDLSPINNINGTOP(equity, start=None, end=None):
"""Spinning Top
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLSPINNINGTOP(opn, high, low, close)
return integer
def CDLSTALLEDPATTERN(equity, start=None, end=None):
"""Stalled Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLSTALLEDPATTERN(opn, high, low, close)
return integer
def CDLSTICKSANDWICH(equity, start=None, end=None):
"""Stick Sandwich
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLSTICKSANDWICH(opn, high, low, close)
return integer
def CDLTAKURI(equity, start=None, end=None):
"""Takuri (Dragonfly Doji with very long lower shadow)
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLTAKURI(opn, high, low, close)
return integer
def CDLTASUKIGAP(equity, start=None, end=None):
"""Tasuki Gap
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLTASUKIGAP(opn, high, low, close)
return integer
def CDLTHRUSTING(equity, start=None, end=None):
"""Thrusting Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLTHRUSTING(opn, high, low, close)
return integer
def CDLTRISTAR(equity, start=None, end=None):
"""Tristar Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLTRISTAR(opn, high, low, close)
return integer
def CDLUNIQUE3RIVER(equity, start=None, end=None):
"""Unique 3 River
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLUNIQUE3RIVER(opn, high, low, close)
return integer
def CDLUPSIDEGAP2CROWS(equity, start=None, end=None):
"""Upside Gap Two Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLUPSIDEGAP2CROWS(opn, high, low, close)
return integer
def CDLXSIDEGAP3METHODS(equity, start=None, end=None):
"""Upside/Downside Gap Three Methods
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLXSIDEGAP3METHODS(opn, high, low, close)
return integer
# ========== PATTERN RECOGNITION FUNCTIONS **END** ==========
if __name__ == '__main__':
import datetime
today = datetime.date(2017,8,30)
eq = Equity.Equity('AAPL')
eq.get_hp()
print((CDLSPINNINGTOP(eq)))
print('ok') | [
"talib.HT_TRENDLINE",
"talib.CDLTAKURI",
"talib.CDLXSIDEGAP3METHODS",
"talib.TYPPRICE",
"talib.CDLBREAKAWAY",
"talib.CDLMATCHINGLOW",
"talib.CDLIDENTICAL3CROWS",
"talib.ROCR",
"talib.DEMA",
"talib.CDLONNECK",
"talib.CDLRICKSHAWMAN",
"talib.CDL3INSIDE",
"talib.CDL3STARSINSOUTH",
"talib.MOM"... | [((368, 423), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (376, 423), True, 'import numpy as np\n'), ((463, 558), 'talib.BBANDS', 'talib.BBANDS', (['close'], {'timeperiod': 'timeperiod', 'nbdevup': 'nbdevup', 'nbdevdn': 'nbdevdn', 'matype': 'matype'}), '(close, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn,\n matype=matype)\n', (475, 558), False, 'import talib\n'), ((754, 809), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (762, 809), True, 'import numpy as np\n'), ((821, 861), 'talib.DEMA', 'talib.DEMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (831, 861), False, 'import talib\n'), ((1077, 1132), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (1085, 1132), True, 'import numpy as np\n'), ((1144, 1183), 'talib.EMA', 'talib.EMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (1153, 1183), False, 'import talib\n'), ((1396, 1451), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (1404, 1451), True, 'import numpy as np\n'), ((1463, 1488), 'talib.HT_TRENDLINE', 'talib.HT_TRENDLINE', (['close'], {}), '(close)\n', (1481, 1488), False, 'import talib\n'), ((1711, 1766), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (1719, 1766), True, 'import numpy as np\n'), ((1778, 1818), 'talib.KAMA', 'talib.KAMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (1788, 1818), False, 'import talib\n'), ((1998, 2053), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (2006, 2053), True, 'import numpy as np\n'), ((2065, 2118), 'talib.MA', 'talib.MA', (['close'], {'timeperiod': 'timeperiod', 'matype': 'matype'}), '(close, timeperiod=timeperiod, matype=matype)\n', (2073, 2118), False, 'import talib\n'), ((2370, 2425), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (2378, 2425), True, 'import numpy as np\n'), ((2443, 2502), 'talib.MAMA', 'talib.MAMA', (['close'], {'fastlimit': 'fastlimit', 'slowlimit': 'slowlimit'}), '(close, fastlimit=fastlimit, slowlimit=slowlimit)\n', (2453, 2502), False, 'import talib\n'), ((2773, 2828), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (2781, 2828), True, 'import numpy as np\n'), ((2840, 2928), 'talib.MAVP', 'talib.MAVP', (['close', 'periods'], {'minperiod': 'minperiod', 'maxperiod': 'maxperiod', 'matype': 'matype'}), '(close, periods, minperiod=minperiod, maxperiod=maxperiod, matype\n =matype)\n', (2850, 2928), False, 'import talib\n'), ((3086, 3141), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (3094, 3141), True, 'import numpy as np\n'), ((3153, 3197), 'talib.MIDPOINT', 'talib.MIDPOINT', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (3167, 3197), False, 'import talib\n'), ((3365, 3419), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (3373, 3419), True, 'import numpy as np\n'), ((3430, 3483), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (3438, 3483), True, 'import numpy as np\n'), ((3495, 3543), 'talib.MIDPRICE', 'talib.MIDPRICE', (['high', 'low'], {'timeperiod': 'timeperiod'}), '(high, low, timeperiod=timeperiod)\n', (3509, 3543), False, 'import talib\n'), ((3727, 3781), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (3735, 3781), True, 'import numpy as np\n'), ((3792, 3845), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (3800, 3845), True, 'import numpy as np\n'), ((3857, 3921), 'talib.SAR', 'talib.SAR', (['high', 'low'], {'acceleration': 'acceleration', 'maximum': 'maximum'}), '(high, low, acceleration=acceleration, maximum=maximum)\n', (3866, 3921), False, 'import talib\n'), ((4466, 4520), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (4474, 4520), True, 'import numpy as np\n'), ((4531, 4584), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (4539, 4584), True, 'import numpy as np\n'), ((4596, 4942), 'talib.SAREXT', 'talib.SAREXT', (['high', 'low'], {'startvalue': 'startvalue', 'offsetonreverse': 'offsetonreverse', 'accelerationinitlong': 'accelerationinitlong', 'accelerationlong': 'accelerationlong', 'accelerationmaxlong': 'accelerationmaxlong', 'accelerationinitshort': 'accelerationinitshort', 'accelerationshort': 'accelerationshort', 'accelerationmaxshort': 'accelerationmaxshort'}), '(high, low, startvalue=startvalue, offsetonreverse=\n offsetonreverse, accelerationinitlong=accelerationinitlong,\n accelerationlong=accelerationlong, accelerationmaxlong=\n accelerationmaxlong, accelerationinitshort=accelerationinitshort,\n accelerationshort=accelerationshort, accelerationmaxshort=\n accelerationmaxshort)\n', (4608, 4942), False, 'import talib\n'), ((5126, 5181), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (5134, 5181), True, 'import numpy as np\n'), ((5193, 5232), 'talib.SMA', 'talib.SMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (5202, 5232), False, 'import talib\n'), ((5488, 5543), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (5496, 5543), True, 'import numpy as np\n'), ((5555, 5610), 'talib.T3', 'talib.T3', (['close'], {'timeperiod': 'timeperiod', 'vfactor': 'vfactor'}), '(close, timeperiod=timeperiod, vfactor=vfactor)\n', (5563, 5610), False, 'import talib\n'), ((5782, 5837), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (5790, 5837), True, 'import numpy as np\n'), ((5849, 5889), 'talib.TEMA', 'talib.TEMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (5859, 5889), False, 'import talib\n'), ((6054, 6109), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (6062, 6109), True, 'import numpy as np\n'), ((6121, 6162), 'talib.TRIMA', 'talib.TRIMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (6132, 6162), False, 'import talib\n'), ((6323, 6378), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (6331, 6378), True, 'import numpy as np\n'), ((6390, 6429), 'talib.WMA', 'talib.WMA', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (6399, 6429), False, 'import talib\n'), ((6768, 6822), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (6776, 6822), True, 'import numpy as np\n'), ((6833, 6886), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (6841, 6886), True, 'import numpy as np\n'), ((6899, 6954), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (6907, 6954), True, 'import numpy as np\n'), ((6966, 7016), 'talib.ADX', 'talib.ADX', (['high', 'low', 'close'], {'timeperiod': 'timeperiod'}), '(high, low, close, timeperiod=timeperiod)\n', (6975, 7016), False, 'import talib\n'), ((7248, 7302), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (7256, 7302), True, 'import numpy as np\n'), ((7313, 7366), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (7321, 7366), True, 'import numpy as np\n'), ((7379, 7434), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (7387, 7434), True, 'import numpy as np\n'), ((7446, 7497), 'talib.ADXR', 'talib.ADXR', (['high', 'low', 'close'], {'timeperiod': 'timeperiod'}), '(high, low, close, timeperiod=timeperiod)\n', (7456, 7497), False, 'import talib\n'), ((7727, 7782), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (7735, 7782), True, 'import numpy as np\n'), ((7794, 7871), 'talib.APO', 'talib.APO', (['close'], {'fastperiod': 'fastperiod', 'slowperiod': 'slowperiod', 'matype': 'matype'}), '(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)\n', (7803, 7871), False, 'import talib\n'), ((8015, 8069), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (8023, 8069), True, 'import numpy as np\n'), ((8080, 8133), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (8088, 8133), True, 'import numpy as np\n'), ((8159, 8204), 'talib.AROON', 'talib.AROON', (['high', 'low'], {'timeperiod': 'timeperiod'}), '(high, low, timeperiod=timeperiod)\n', (8170, 8204), False, 'import talib\n'), ((8376, 8430), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (8384, 8430), True, 'import numpy as np\n'), ((8441, 8494), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (8449, 8494), True, 'import numpy as np\n'), ((8506, 8554), 'talib.AROONOSC', 'talib.AROONOSC', (['high', 'low'], {'timeperiod': 'timeperiod'}), '(high, low, timeperiod=timeperiod)\n', (8520, 8554), False, 'import talib\n'), ((8668, 8722), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (8676, 8722), True, 'import numpy as np\n'), ((8734, 8788), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (8742, 8788), True, 'import numpy as np\n'), ((8799, 8852), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (8807, 8852), True, 'import numpy as np\n'), ((8865, 8920), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (8873, 8920), True, 'import numpy as np\n'), ((8932, 8964), 'talib.BOP', 'talib.BOP', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (8941, 8964), False, 'import talib\n'), ((9124, 9178), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (9132, 9178), True, 'import numpy as np\n'), ((9189, 9242), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (9197, 9242), True, 'import numpy as np\n'), ((9255, 9310), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (9263, 9310), True, 'import numpy as np\n'), ((9322, 9372), 'talib.CCI', 'talib.CCI', (['high', 'low', 'close'], {'timeperiod': 'timeperiod'}), '(high, low, close, timeperiod=timeperiod)\n', (9331, 9372), False, 'import talib\n'), ((9587, 9642), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (9595, 9642), True, 'import numpy as np\n'), ((9654, 9693), 'talib.CMO', 'talib.CMO', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (9663, 9693), False, 'import talib\n'), ((9906, 9960), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (9914, 9960), True, 'import numpy as np\n'), ((9971, 10024), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (9979, 10024), True, 'import numpy as np\n'), ((10037, 10092), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (10045, 10092), True, 'import numpy as np\n'), ((10104, 10153), 'talib.DX', 'talib.DX', (['high', 'low', 'close'], {'timeperiod': 'timeperiod'}), '(high, low, close, timeperiod=timeperiod)\n', (10112, 10153), False, 'import talib\n'), ((10408, 10463), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (10416, 10463), True, 'import numpy as np\n'), ((10497, 10591), 'talib.MACD', 'talib.MACD', (['close'], {'fastperiod': 'fastperiod', 'slowperiod': 'slowperiod', 'signalperiod': 'signalperiod'}), '(close, fastperiod=fastperiod, slowperiod=slowperiod,\n signalperiod=signalperiod)\n', (10507, 10591), False, 'import talib\n'), ((10975, 11030), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (10983, 11030), True, 'import numpy as np\n'), ((11064, 11179), 'talib.MACDEXT', 'talib.MACDEXT', (['close'], {'fastperiod': '(12)', 'fastmatype': '(0)', 'slowperiod': '(26)', 'slowmatype': '(0)', 'signalperiod': '(9)', 'signalmatype': '(0)'}), '(close, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype\n =0, signalperiod=9, signalmatype=0)\n', (11077, 11179), False, 'import talib\n'), ((11435, 11490), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (11443, 11490), True, 'import numpy as np\n'), ((11524, 11571), 'talib.MACDFIX', 'talib.MACDFIX', (['close'], {'signalperiod': 'signalperiod'}), '(close, signalperiod=signalperiod)\n', (11537, 11571), False, 'import talib\n'), ((11798, 11852), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (11806, 11852), True, 'import numpy as np\n'), ((11863, 11916), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (11871, 11916), True, 'import numpy as np\n'), ((11929, 11984), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (11937, 11984), True, 'import numpy as np\n'), ((11998, 12054), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'volume']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'volume'], dtype='f8')\n", (12006, 12054), True, 'import numpy as np\n'), ((12066, 12124), 'talib.MFI', 'talib.MFI', (['high', 'low', 'close', 'volume'], {'timeperiod': 'timeperiod'}), '(high, low, close, volume, timeperiod=timeperiod)\n', (12075, 12124), False, 'import talib\n'), ((12347, 12401), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (12355, 12401), True, 'import numpy as np\n'), ((12412, 12465), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (12420, 12465), True, 'import numpy as np\n'), ((12478, 12533), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (12486, 12533), True, 'import numpy as np\n'), ((12545, 12600), 'talib.MINUS_DI', 'talib.MINUS_DI', (['high', 'low', 'close'], {'timeperiod': 'timeperiod'}), '(high, low, close, timeperiod=timeperiod)\n', (12559, 12600), False, 'import talib\n'), ((12825, 12879), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (12833, 12879), True, 'import numpy as np\n'), ((12890, 12943), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (12898, 12943), True, 'import numpy as np\n'), ((12955, 13003), 'talib.MINUS_DM', 'talib.MINUS_DM', (['high', 'low'], {'timeperiod': 'timeperiod'}), '(high, low, timeperiod=timeperiod)\n', (12969, 13003), False, 'import talib\n'), ((13149, 13204), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (13157, 13204), True, 'import numpy as np\n'), ((13216, 13255), 'talib.MOM', 'talib.MOM', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (13225, 13255), False, 'import talib\n'), ((13475, 13529), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (13483, 13529), True, 'import numpy as np\n'), ((13540, 13593), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (13548, 13593), True, 'import numpy as np\n'), ((13606, 13661), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (13614, 13661), True, 'import numpy as np\n'), ((13673, 13727), 'talib.PLUS_DI', 'talib.PLUS_DI', (['high', 'low', 'close'], {'timeperiod': 'timeperiod'}), '(high, low, close, timeperiod=timeperiod)\n', (13686, 13727), False, 'import talib\n'), ((13949, 14003), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (13957, 14003), True, 'import numpy as np\n'), ((14014, 14067), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (14022, 14067), True, 'import numpy as np\n'), ((14079, 14126), 'talib.PLUS_DM', 'talib.PLUS_DM', (['high', 'low'], {'timeperiod': 'timeperiod'}), '(high, low, timeperiod=timeperiod)\n', (14092, 14126), False, 'import talib\n'), ((14358, 14413), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (14366, 14413), True, 'import numpy as np\n'), ((14425, 14502), 'talib.PPO', 'talib.PPO', (['close'], {'fastperiod': 'fastperiod', 'slowperiod': 'slowperiod', 'matype': 'matype'}), '(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)\n', (14434, 14502), False, 'import talib\n'), ((14682, 14737), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (14690, 14737), True, 'import numpy as np\n'), ((14749, 14788), 'talib.ROC', 'talib.ROC', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (14758, 14788), False, 'import talib\n'), ((14981, 15036), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (14989, 15036), True, 'import numpy as np\n'), ((15048, 15088), 'talib.ROCP', 'talib.ROCP', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (15058, 15088), False, 'import talib\n'), ((15266, 15321), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (15274, 15321), True, 'import numpy as np\n'), ((15333, 15373), 'talib.ROCR', 'talib.ROCR', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (15343, 15373), False, 'import talib\n'), ((15568, 15623), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (15576, 15623), True, 'import numpy as np\n'), ((15635, 15678), 'talib.ROCR100', 'talib.ROCR100', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (15648, 15678), False, 'import talib\n'), ((15891, 15946), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (15899, 15946), True, 'import numpy as np\n'), ((15958, 15997), 'talib.RSI', 'talib.RSI', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (15967, 15997), False, 'import talib\n'), ((16313, 16367), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (16321, 16367), True, 'import numpy as np\n'), ((16378, 16431), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (16386, 16431), True, 'import numpy as np\n'), ((16444, 16499), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (16452, 16499), True, 'import numpy as np\n'), ((16519, 16692), 'talib.STOCH', 'talib.STOCH', (['high', 'low', 'close'], {'fastk_period': 'fastk_period', 'slowk_period': 'slowk_period', 'slowk_matype': 'slowk_matype', 'slowd_period': 'slowd_period', 'slowd_matype': 'slowd_matype'}), '(high, low, close, fastk_period=fastk_period, slowk_period=\n slowk_period, slowk_matype=slowk_matype, slowd_period=slowd_period,\n slowd_matype=slowd_matype)\n', (16530, 16692), False, 'import talib\n'), ((16962, 17016), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (16970, 17016), True, 'import numpy as np\n'), ((17027, 17080), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (17035, 17080), True, 'import numpy as np\n'), ((17093, 17148), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (17101, 17148), True, 'import numpy as np\n'), ((17168, 17284), 'talib.STOCHF', 'talib.STOCHF', (['high', 'low', 'close'], {'fastk_period': 'fastk_period', 'fastd_period': 'fastd_period', 'fastd_matype': 'fastd_matype'}), '(high, low, close, fastk_period=fastk_period, fastd_period=\n fastd_period, fastd_matype=fastd_matype)\n', (17180, 17284), False, 'import talib\n'), ((17676, 17731), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (17684, 17731), True, 'import numpy as np\n'), ((17751, 17880), 'talib.STOCHRSI', 'talib.STOCHRSI', (['close'], {'timeperiod': 'timeperiod', 'fastk_period': 'fastk_period', 'fastd_period': 'fastd_period', 'fastd_matype': 'fastd_matype'}), '(close, timeperiod=timeperiod, fastk_period=fastk_period,\n fastd_period=fastd_period, fastd_matype=fastd_matype)\n', (17765, 17880), False, 'import talib\n'), ((18106, 18161), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (18114, 18161), True, 'import numpy as np\n'), ((18173, 18213), 'talib.TRIX', 'talib.TRIX', (['close'], {'timeperiod': 'timeperiod'}), '(close, timeperiod=timeperiod)\n', (18183, 18213), False, 'import talib\n'), ((18453, 18507), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (18461, 18507), True, 'import numpy as np\n'), ((18518, 18571), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (18526, 18571), True, 'import numpy as np\n'), ((18584, 18639), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (18592, 18639), True, 'import numpy as np\n'), ((18651, 18761), 'talib.ULTOSC', 'talib.ULTOSC', (['high', 'low', 'close'], {'timeperiod1': 'timeperiod1', 'timeperiod2': 'timeperiod2', 'timeperiod3': 'timeperiod3'}), '(high, low, close, timeperiod1=timeperiod1, timeperiod2=\n timeperiod2, timeperiod3=timeperiod3)\n', (18663, 18761), False, 'import talib\n'), ((18891, 18945), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (18899, 18945), True, 'import numpy as np\n'), ((18956, 19009), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (18964, 19009), True, 'import numpy as np\n'), ((19022, 19077), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (19030, 19077), True, 'import numpy as np\n'), ((19089, 19133), 'talib.WILLR', 'talib.WILLR', (['high', 'low', 'close'], {'timeperiod': '(14)'}), '(high, low, close, timeperiod=14)\n', (19100, 19133), False, 'import talib\n'), ((19367, 19421), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (19375, 19421), True, 'import numpy as np\n'), ((19433, 19487), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (19441, 19487), True, 'import numpy as np\n'), ((19498, 19551), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (19506, 19551), True, 'import numpy as np\n'), ((19564, 19619), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (19572, 19619), True, 'import numpy as np\n'), ((19631, 19668), 'talib.AVGPRICE', 'talib.AVGPRICE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (19645, 19668), False, 'import talib\n'), ((19784, 19838), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (19792, 19838), True, 'import numpy as np\n'), ((19849, 19902), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (19857, 19902), True, 'import numpy as np\n'), ((19914, 19939), 'talib.MEDPRICE', 'talib.MEDPRICE', (['high', 'low'], {}), '(high, low)\n', (19928, 19939), False, 'import talib\n'), ((20056, 20110), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (20064, 20110), True, 'import numpy as np\n'), ((20121, 20174), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (20129, 20174), True, 'import numpy as np\n'), ((20187, 20242), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (20195, 20242), True, 'import numpy as np\n'), ((20254, 20286), 'talib.TYPPRICE', 'talib.TYPPRICE', (['high', 'low', 'close'], {}), '(high, low, close)\n', (20268, 20286), False, 'import talib\n'), ((20410, 20464), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (20418, 20464), True, 'import numpy as np\n'), ((20475, 20528), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (20483, 20528), True, 'import numpy as np\n'), ((20541, 20596), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (20549, 20596), True, 'import numpy as np\n'), ((20608, 20640), 'talib.WCLPRICE', 'talib.WCLPRICE', (['high', 'low', 'close'], {}), '(high, low, close)\n', (20622, 20640), False, 'import talib\n'), ((20876, 20930), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (20884, 20930), True, 'import numpy as np\n'), ((20942, 20996), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (20950, 20996), True, 'import numpy as np\n'), ((21007, 21060), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (21015, 21060), True, 'import numpy as np\n'), ((21073, 21128), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (21081, 21128), True, 'import numpy as np\n'), ((21143, 21181), 'talib.CDL2CROWS', 'talib.CDL2CROWS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (21158, 21181), False, 'import talib\n'), ((21310, 21364), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (21318, 21364), True, 'import numpy as np\n'), ((21376, 21430), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (21384, 21430), True, 'import numpy as np\n'), ((21441, 21494), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (21449, 21494), True, 'import numpy as np\n'), ((21507, 21562), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (21515, 21562), True, 'import numpy as np\n'), ((21577, 21620), 'talib.CDL3BLACKCROWS', 'talib.CDL3BLACKCROWS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (21597, 21620), False, 'import talib\n'), ((21748, 21802), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (21756, 21802), True, 'import numpy as np\n'), ((21814, 21868), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (21822, 21868), True, 'import numpy as np\n'), ((21879, 21932), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (21887, 21932), True, 'import numpy as np\n'), ((21945, 22000), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (21953, 22000), True, 'import numpy as np\n'), ((22015, 22054), 'talib.CDL3INSIDE', 'talib.CDL3INSIDE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (22031, 22054), False, 'import talib\n'), ((22183, 22237), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (22191, 22237), True, 'import numpy as np\n'), ((22249, 22303), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (22257, 22303), True, 'import numpy as np\n'), ((22314, 22367), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (22322, 22367), True, 'import numpy as np\n'), ((22380, 22435), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (22388, 22435), True, 'import numpy as np\n'), ((22450, 22493), 'talib.CDL3LINESTRIKE', 'talib.CDL3LINESTRIKE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (22470, 22493), False, 'import talib\n'), ((22623, 22677), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (22631, 22677), True, 'import numpy as np\n'), ((22689, 22743), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (22697, 22743), True, 'import numpy as np\n'), ((22754, 22807), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (22762, 22807), True, 'import numpy as np\n'), ((22820, 22875), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (22828, 22875), True, 'import numpy as np\n'), ((22890, 22930), 'talib.CDL3OUTSIDE', 'talib.CDL3OUTSIDE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (22907, 22930), False, 'import talib\n'), ((23068, 23122), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (23076, 23122), True, 'import numpy as np\n'), ((23134, 23188), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (23142, 23188), True, 'import numpy as np\n'), ((23199, 23252), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (23207, 23252), True, 'import numpy as np\n'), ((23265, 23320), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (23273, 23320), True, 'import numpy as np\n'), ((23335, 23380), 'talib.CDL3STARSINSOUTH', 'talib.CDL3STARSINSOUTH', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (23357, 23380), False, 'import talib\n'), ((23525, 23579), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (23533, 23579), True, 'import numpy as np\n'), ((23591, 23645), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (23599, 23645), True, 'import numpy as np\n'), ((23656, 23709), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (23664, 23709), True, 'import numpy as np\n'), ((23722, 23777), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (23730, 23777), True, 'import numpy as np\n'), ((23792, 23838), 'talib.CDL3WHITESOLDIERS', 'talib.CDL3WHITESOLDIERS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (23815, 23838), False, 'import talib\n'), ((24005, 24059), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (24013, 24059), True, 'import numpy as np\n'), ((24071, 24125), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (24079, 24125), True, 'import numpy as np\n'), ((24136, 24189), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (24144, 24189), True, 'import numpy as np\n'), ((24202, 24257), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (24210, 24257), True, 'import numpy as np\n'), ((24272, 24342), 'talib.CDLABANDONEDBABY', 'talib.CDLABANDONEDBABY', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (24294, 24342), False, 'import talib\n'), ((24468, 24522), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (24476, 24522), True, 'import numpy as np\n'), ((24534, 24588), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (24542, 24588), True, 'import numpy as np\n'), ((24599, 24652), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (24607, 24652), True, 'import numpy as np\n'), ((24665, 24720), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (24673, 24720), True, 'import numpy as np\n'), ((24735, 24779), 'talib.CDLADVANCEBLOCK', 'talib.CDLADVANCEBLOCK', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (24756, 24779), False, 'import talib\n'), ((24897, 24951), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (24905, 24951), True, 'import numpy as np\n'), ((24963, 25017), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (24971, 25017), True, 'import numpy as np\n'), ((25028, 25081), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (25036, 25081), True, 'import numpy as np\n'), ((25094, 25149), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (25102, 25149), True, 'import numpy as np\n'), ((25164, 25204), 'talib.CDLBELTHOLD', 'talib.CDLBELTHOLD', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (25181, 25204), False, 'import talib\n'), ((25323, 25377), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (25331, 25377), True, 'import numpy as np\n'), ((25389, 25443), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (25397, 25443), True, 'import numpy as np\n'), ((25454, 25507), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (25462, 25507), True, 'import numpy as np\n'), ((25520, 25575), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (25528, 25575), True, 'import numpy as np\n'), ((25590, 25631), 'talib.CDLBREAKAWAY', 'talib.CDLBREAKAWAY', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (25608, 25631), False, 'import talib\n'), ((25763, 25817), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (25771, 25817), True, 'import numpy as np\n'), ((25829, 25883), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (25837, 25883), True, 'import numpy as np\n'), ((25894, 25947), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (25902, 25947), True, 'import numpy as np\n'), ((25960, 26015), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (25968, 26015), True, 'import numpy as np\n'), ((26030, 26077), 'talib.CDLCLOSINGMARUBOZU', 'talib.CDLCLOSINGMARUBOZU', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (26054, 26077), False, 'import talib\n'), ((26217, 26271), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (26225, 26271), True, 'import numpy as np\n'), ((26283, 26337), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (26291, 26337), True, 'import numpy as np\n'), ((26348, 26401), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (26356, 26401), True, 'import numpy as np\n'), ((26414, 26469), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (26422, 26469), True, 'import numpy as np\n'), ((26484, 26532), 'talib.CDLCONCEALBABYSWALL', 'talib.CDLCONCEALBABYSWALL', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (26509, 26532), False, 'import talib\n'), ((26659, 26713), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (26667, 26713), True, 'import numpy as np\n'), ((26725, 26779), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (26733, 26779), True, 'import numpy as np\n'), ((26790, 26843), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (26798, 26843), True, 'import numpy as np\n'), ((26856, 26911), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (26864, 26911), True, 'import numpy as np\n'), ((26926, 26971), 'talib.CDLCOUNTERATTACK', 'talib.CDLCOUNTERATTACK', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (26948, 26971), False, 'import talib\n'), ((27141, 27195), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (27149, 27195), True, 'import numpy as np\n'), ((27207, 27261), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (27215, 27261), True, 'import numpy as np\n'), ((27272, 27325), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (27280, 27325), True, 'import numpy as np\n'), ((27338, 27393), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (27346, 27393), True, 'import numpy as np\n'), ((27408, 27479), 'talib.CDLDARKCLOUDCOVER', 'talib.CDLDARKCLOUDCOVER', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (27431, 27479), False, 'import talib\n'), ((27588, 27642), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (27596, 27642), True, 'import numpy as np\n'), ((27654, 27708), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (27662, 27708), True, 'import numpy as np\n'), ((27719, 27772), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (27727, 27772), True, 'import numpy as np\n'), ((27785, 27840), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (27793, 27840), True, 'import numpy as np\n'), ((27855, 27891), 'talib.CDLDOJI', 'talib.CDLDOJI', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (27868, 27891), False, 'import talib\n'), ((28009, 28063), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (28017, 28063), True, 'import numpy as np\n'), ((28075, 28129), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (28083, 28129), True, 'import numpy as np\n'), ((28140, 28193), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (28148, 28193), True, 'import numpy as np\n'), ((28206, 28261), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (28214, 28261), True, 'import numpy as np\n'), ((28276, 28316), 'talib.CDLDOJISTAR', 'talib.CDLDOJISTAR', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (28293, 28316), False, 'import talib\n'), ((28444, 28498), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (28452, 28498), True, 'import numpy as np\n'), ((28510, 28564), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (28518, 28564), True, 'import numpy as np\n'), ((28575, 28628), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (28583, 28628), True, 'import numpy as np\n'), ((28641, 28696), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (28649, 28696), True, 'import numpy as np\n'), ((28711, 28756), 'talib.CDLDRAGONFLYDOJI', 'talib.CDLDRAGONFLYDOJI', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (28733, 28756), False, 'import talib\n'), ((28883, 28937), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (28891, 28937), True, 'import numpy as np\n'), ((28949, 29003), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (28957, 29003), True, 'import numpy as np\n'), ((29014, 29067), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (29022, 29067), True, 'import numpy as np\n'), ((29080, 29135), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (29088, 29135), True, 'import numpy as np\n'), ((29150, 29191), 'talib.CDLENGULFING', 'talib.CDLENGULFING', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (29168, 29191), False, 'import talib\n'), ((29363, 29417), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (29371, 29417), True, 'import numpy as np\n'), ((29429, 29483), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (29437, 29483), True, 'import numpy as np\n'), ((29494, 29547), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (29502, 29547), True, 'import numpy as np\n'), ((29560, 29615), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (29568, 29615), True, 'import numpy as np\n'), ((29630, 29702), 'talib.CDLEVENINGDOJISTAR', 'talib.CDLEVENINGDOJISTAR', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (29654, 29702), False, 'import talib\n'), ((29865, 29919), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (29873, 29919), True, 'import numpy as np\n'), ((29931, 29985), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (29939, 29985), True, 'import numpy as np\n'), ((29996, 30049), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (30004, 30049), True, 'import numpy as np\n'), ((30062, 30117), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (30070, 30117), True, 'import numpy as np\n'), ((30132, 30200), 'talib.CDLEVENINGSTAR', 'talib.CDLEVENINGSTAR', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (30152, 30200), False, 'import talib\n'), ((30353, 30407), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (30361, 30407), True, 'import numpy as np\n'), ((30419, 30473), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (30427, 30473), True, 'import numpy as np\n'), ((30484, 30537), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (30492, 30537), True, 'import numpy as np\n'), ((30550, 30605), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (30558, 30605), True, 'import numpy as np\n'), ((30620, 30668), 'talib.CDLGAPSIDESIDEWHITE', 'talib.CDLGAPSIDESIDEWHITE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (30645, 30668), False, 'import talib\n'), ((30798, 30852), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (30806, 30852), True, 'import numpy as np\n'), ((30864, 30918), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (30872, 30918), True, 'import numpy as np\n'), ((30929, 30982), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (30937, 30982), True, 'import numpy as np\n'), ((30995, 31050), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (31003, 31050), True, 'import numpy as np\n'), ((31065, 31111), 'talib.CDLGRAVESTONEDOJI', 'talib.CDLGRAVESTONEDOJI', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (31088, 31111), False, 'import talib\n'), ((31224, 31278), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (31232, 31278), True, 'import numpy as np\n'), ((31290, 31344), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (31298, 31344), True, 'import numpy as np\n'), ((31355, 31408), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (31363, 31408), True, 'import numpy as np\n'), ((31421, 31476), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (31429, 31476), True, 'import numpy as np\n'), ((31491, 31529), 'talib.CDLHAMMER', 'talib.CDLHAMMER', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (31506, 31529), False, 'import talib\n'), ((31651, 31705), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (31659, 31705), True, 'import numpy as np\n'), ((31717, 31771), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (31725, 31771), True, 'import numpy as np\n'), ((31782, 31835), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (31790, 31835), True, 'import numpy as np\n'), ((31848, 31903), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (31856, 31903), True, 'import numpy as np\n'), ((31918, 31960), 'talib.CDLHANGINGMAN', 'talib.CDLHANGINGMAN', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (31937, 31960), False, 'import talib\n'), ((32081, 32135), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (32089, 32135), True, 'import numpy as np\n'), ((32147, 32201), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (32155, 32201), True, 'import numpy as np\n'), ((32212, 32265), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (32220, 32265), True, 'import numpy as np\n'), ((32278, 32333), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (32286, 32333), True, 'import numpy as np\n'), ((32348, 32386), 'talib.CDLHARAMI', 'talib.CDLHARAMI', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (32363, 32386), False, 'import talib\n'), ((32518, 32572), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (32526, 32572), True, 'import numpy as np\n'), ((32584, 32638), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (32592, 32638), True, 'import numpy as np\n'), ((32649, 32702), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (32657, 32702), True, 'import numpy as np\n'), ((32715, 32770), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (32723, 32770), True, 'import numpy as np\n'), ((32785, 32828), 'talib.CDLHARAMICROSS', 'talib.CDLHARAMICROSS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (32805, 32828), False, 'import talib\n'), ((32953, 33007), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (32961, 33007), True, 'import numpy as np\n'), ((33019, 33073), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (33027, 33073), True, 'import numpy as np\n'), ((33084, 33137), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (33092, 33137), True, 'import numpy as np\n'), ((33150, 33205), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (33158, 33205), True, 'import numpy as np\n'), ((33220, 33260), 'talib.CDLHIGHWAVE', 'talib.CDLHIGHWAVE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (33237, 33260), False, 'import talib\n'), ((33383, 33437), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (33391, 33437), True, 'import numpy as np\n'), ((33449, 33503), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (33457, 33503), True, 'import numpy as np\n'), ((33514, 33567), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (33522, 33567), True, 'import numpy as np\n'), ((33580, 33635), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (33588, 33635), True, 'import numpy as np\n'), ((33650, 33689), 'talib.CDLHIKKAKE', 'talib.CDLHIKKAKE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (33666, 33689), False, 'import talib\n'), ((33824, 33878), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (33832, 33878), True, 'import numpy as np\n'), ((33890, 33944), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (33898, 33944), True, 'import numpy as np\n'), ((33955, 34008), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (33963, 34008), True, 'import numpy as np\n'), ((34021, 34076), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (34029, 34076), True, 'import numpy as np\n'), ((34091, 34133), 'talib.CDLHIKKAKEMOD', 'talib.CDLHIKKAKEMOD', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (34110, 34133), False, 'import talib\n'), ((34259, 34313), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (34267, 34313), True, 'import numpy as np\n'), ((34325, 34379), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (34333, 34379), True, 'import numpy as np\n'), ((34390, 34443), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (34398, 34443), True, 'import numpy as np\n'), ((34456, 34511), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (34464, 34511), True, 'import numpy as np\n'), ((34526, 34570), 'talib.CDLHOMINGPIGEON', 'talib.CDLHOMINGPIGEON', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (34547, 34570), False, 'import talib\n'), ((34707, 34761), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (34715, 34761), True, 'import numpy as np\n'), ((34773, 34827), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (34781, 34827), True, 'import numpy as np\n'), ((34838, 34891), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (34846, 34891), True, 'import numpy as np\n'), ((34904, 34959), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (34912, 34959), True, 'import numpy as np\n'), ((34974, 35021), 'talib.CDLIDENTICAL3CROWS', 'talib.CDLIDENTICAL3CROWS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (34998, 35021), False, 'import talib\n'), ((35143, 35197), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (35151, 35197), True, 'import numpy as np\n'), ((35209, 35263), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (35217, 35263), True, 'import numpy as np\n'), ((35274, 35327), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (35282, 35327), True, 'import numpy as np\n'), ((35340, 35395), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (35348, 35395), True, 'import numpy as np\n'), ((35410, 35448), 'talib.CDLINNECK', 'talib.CDLINNECK', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (35425, 35448), False, 'import talib\n'), ((35578, 35632), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (35586, 35632), True, 'import numpy as np\n'), ((35644, 35698), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (35652, 35698), True, 'import numpy as np\n'), ((35709, 35762), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (35717, 35762), True, 'import numpy as np\n'), ((35775, 35830), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (35783, 35830), True, 'import numpy as np\n'), ((35845, 35891), 'talib.CDLINVERTEDHAMMER', 'talib.CDLINVERTEDHAMMER', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (35868, 35891), False, 'import talib\n'), ((36006, 36060), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (36014, 36060), True, 'import numpy as np\n'), ((36072, 36126), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (36080, 36126), True, 'import numpy as np\n'), ((36137, 36190), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (36145, 36190), True, 'import numpy as np\n'), ((36203, 36258), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (36211, 36258), True, 'import numpy as np\n'), ((36273, 36312), 'talib.CDLKICKING', 'talib.CDLKICKING', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (36289, 36312), False, 'import talib\n'), ((36481, 36535), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (36489, 36535), True, 'import numpy as np\n'), ((36547, 36601), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (36555, 36601), True, 'import numpy as np\n'), ((36612, 36665), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (36620, 36665), True, 'import numpy as np\n'), ((36678, 36733), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (36686, 36733), True, 'import numpy as np\n'), ((36748, 36795), 'talib.CDLKICKINGBYLENGTH', 'talib.CDLKICKINGBYLENGTH', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (36772, 36795), False, 'import talib\n'), ((36921, 36975), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (36929, 36975), True, 'import numpy as np\n'), ((36987, 37041), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (36995, 37041), True, 'import numpy as np\n'), ((37052, 37105), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (37060, 37105), True, 'import numpy as np\n'), ((37118, 37173), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (37126, 37173), True, 'import numpy as np\n'), ((37188, 37232), 'talib.CDLLADDERBOTTOM', 'talib.CDLLADDERBOTTOM', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (37209, 37232), False, 'import talib\n'), ((37363, 37417), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (37371, 37417), True, 'import numpy as np\n'), ((37429, 37483), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (37437, 37483), True, 'import numpy as np\n'), ((37494, 37547), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (37502, 37547), True, 'import numpy as np\n'), ((37560, 37615), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (37568, 37615), True, 'import numpy as np\n'), ((37630, 37676), 'talib.CDLLONGLEGGEDDOJI', 'talib.CDLLONGLEGGEDDOJI', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (37653, 37676), False, 'import talib\n'), ((37801, 37855), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (37809, 37855), True, 'import numpy as np\n'), ((37867, 37921), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (37875, 37921), True, 'import numpy as np\n'), ((37932, 37985), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (37940, 37985), True, 'import numpy as np\n'), ((37998, 38053), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (38006, 38053), True, 'import numpy as np\n'), ((38068, 38108), 'talib.CDLLONGLINE', 'talib.CDLLONGLINE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (38085, 38108), False, 'import talib\n'), ((38225, 38279), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (38233, 38279), True, 'import numpy as np\n'), ((38291, 38345), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (38299, 38345), True, 'import numpy as np\n'), ((38356, 38409), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (38364, 38409), True, 'import numpy as np\n'), ((38422, 38477), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (38430, 38477), True, 'import numpy as np\n'), ((38492, 38532), 'talib.CDLMARUBOZU', 'talib.CDLMARUBOZU', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (38509, 38532), False, 'import talib\n'), ((38656, 38710), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (38664, 38710), True, 'import numpy as np\n'), ((38722, 38776), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (38730, 38776), True, 'import numpy as np\n'), ((38787, 38840), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (38795, 38840), True, 'import numpy as np\n'), ((38853, 38908), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (38861, 38908), True, 'import numpy as np\n'), ((38923, 38966), 'talib.CDLMATCHINGLOW', 'talib.CDLMATCHINGLOW', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (38943, 38966), False, 'import talib\n'), ((39121, 39175), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (39129, 39175), True, 'import numpy as np\n'), ((39187, 39241), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (39195, 39241), True, 'import numpy as np\n'), ((39252, 39305), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (39260, 39305), True, 'import numpy as np\n'), ((39318, 39373), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (39326, 39373), True, 'import numpy as np\n'), ((39388, 39452), 'talib.CDLMATHOLD', 'talib.CDLMATHOLD', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (39404, 39452), False, 'import talib\n'), ((39624, 39678), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (39632, 39678), True, 'import numpy as np\n'), ((39690, 39744), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (39698, 39744), True, 'import numpy as np\n'), ((39755, 39808), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (39763, 39808), True, 'import numpy as np\n'), ((39821, 39876), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (39829, 39876), True, 'import numpy as np\n'), ((39891, 39963), 'talib.CDLMORNINGDOJISTAR', 'talib.CDLMORNINGDOJISTAR', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (39915, 39963), False, 'import talib\n'), ((40126, 40180), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (40134, 40180), True, 'import numpy as np\n'), ((40192, 40246), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (40200, 40246), True, 'import numpy as np\n'), ((40257, 40310), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (40265, 40310), True, 'import numpy as np\n'), ((40323, 40378), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (40331, 40378), True, 'import numpy as np\n'), ((40393, 40461), 'talib.CDLMORNINGSTAR', 'talib.CDLMORNINGSTAR', (['opn', 'high', 'low', 'close'], {'penetration': 'penetration'}), '(opn, high, low, close, penetration=penetration)\n', (40413, 40461), False, 'import talib\n'), ((40583, 40637), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (40591, 40637), True, 'import numpy as np\n'), ((40649, 40703), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (40657, 40703), True, 'import numpy as np\n'), ((40714, 40767), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (40722, 40767), True, 'import numpy as np\n'), ((40780, 40835), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (40788, 40835), True, 'import numpy as np\n'), ((40850, 40888), 'talib.CDLONNECK', 'talib.CDLONNECK', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (40865, 40888), False, 'import talib\n'), ((41013, 41067), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (41021, 41067), True, 'import numpy as np\n'), ((41079, 41133), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (41087, 41133), True, 'import numpy as np\n'), ((41144, 41197), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (41152, 41197), True, 'import numpy as np\n'), ((41210, 41265), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (41218, 41265), True, 'import numpy as np\n'), ((41280, 41320), 'talib.CDLPIERCING', 'talib.CDLPIERCING', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (41297, 41320), False, 'import talib\n'), ((41444, 41498), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (41452, 41498), True, 'import numpy as np\n'), ((41510, 41564), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (41518, 41564), True, 'import numpy as np\n'), ((41575, 41628), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (41583, 41628), True, 'import numpy as np\n'), ((41641, 41696), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (41649, 41696), True, 'import numpy as np\n'), ((41711, 41754), 'talib.CDLRICKSHAWMAN', 'talib.CDLRICKSHAWMAN', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (41731, 41754), False, 'import talib\n'), ((41899, 41953), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (41907, 41953), True, 'import numpy as np\n'), ((41965, 42019), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (41973, 42019), True, 'import numpy as np\n'), ((42030, 42083), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (42038, 42083), True, 'import numpy as np\n'), ((42096, 42151), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (42104, 42151), True, 'import numpy as np\n'), ((42166, 42214), 'talib.CDLRISEFALL3METHODS', 'talib.CDLRISEFALL3METHODS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (42191, 42214), False, 'import talib\n'), ((42346, 42400), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (42354, 42400), True, 'import numpy as np\n'), ((42412, 42466), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (42420, 42466), True, 'import numpy as np\n'), ((42477, 42530), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (42485, 42530), True, 'import numpy as np\n'), ((42543, 42598), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (42551, 42598), True, 'import numpy as np\n'), ((42613, 42660), 'talib.CDLSEPARATINGLINES', 'talib.CDLSEPARATINGLINES', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (42637, 42660), False, 'import talib\n'), ((42786, 42840), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (42794, 42840), True, 'import numpy as np\n'), ((42852, 42906), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (42860, 42906), True, 'import numpy as np\n'), ((42917, 42970), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (42925, 42970), True, 'import numpy as np\n'), ((42983, 43038), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (42991, 43038), True, 'import numpy as np\n'), ((43053, 43097), 'talib.CDLSHOOTINGSTAR', 'talib.CDLSHOOTINGSTAR', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (43074, 43097), False, 'import talib\n'), ((43224, 43278), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (43232, 43278), True, 'import numpy as np\n'), ((43290, 43344), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (43298, 43344), True, 'import numpy as np\n'), ((43355, 43408), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (43363, 43408), True, 'import numpy as np\n'), ((43421, 43476), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (43429, 43476), True, 'import numpy as np\n'), ((43491, 43532), 'talib.CDLSHORTLINE', 'talib.CDLSHORTLINE', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (43509, 43532), False, 'import talib\n'), ((43656, 43710), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (43664, 43710), True, 'import numpy as np\n'), ((43722, 43776), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (43730, 43776), True, 'import numpy as np\n'), ((43787, 43840), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (43795, 43840), True, 'import numpy as np\n'), ((43853, 43908), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (43861, 43908), True, 'import numpy as np\n'), ((43923, 43966), 'talib.CDLSPINNINGTOP', 'talib.CDLSPINNINGTOP', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (43943, 43966), False, 'import talib\n'), ((44096, 44150), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (44104, 44150), True, 'import numpy as np\n'), ((44162, 44216), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (44170, 44216), True, 'import numpy as np\n'), ((44227, 44280), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (44235, 44280), True, 'import numpy as np\n'), ((44293, 44348), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (44301, 44348), True, 'import numpy as np\n'), ((44363, 44409), 'talib.CDLSTALLEDPATTERN', 'talib.CDLSTALLEDPATTERN', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (44386, 44409), False, 'import talib\n'), ((44537, 44591), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (44545, 44591), True, 'import numpy as np\n'), ((44603, 44657), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (44611, 44657), True, 'import numpy as np\n'), ((44668, 44721), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (44676, 44721), True, 'import numpy as np\n'), ((44734, 44789), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (44742, 44789), True, 'import numpy as np\n'), ((44804, 44849), 'talib.CDLSTICKSANDWICH', 'talib.CDLSTICKSANDWICH', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (44826, 44849), False, 'import talib\n'), ((45007, 45061), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (45015, 45061), True, 'import numpy as np\n'), ((45073, 45127), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (45081, 45127), True, 'import numpy as np\n'), ((45138, 45191), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (45146, 45191), True, 'import numpy as np\n'), ((45204, 45259), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (45212, 45259), True, 'import numpy as np\n'), ((45274, 45312), 'talib.CDLTAKURI', 'talib.CDLTAKURI', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (45289, 45312), False, 'import talib\n'), ((45432, 45486), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (45440, 45486), True, 'import numpy as np\n'), ((45498, 45552), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (45506, 45552), True, 'import numpy as np\n'), ((45563, 45616), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (45571, 45616), True, 'import numpy as np\n'), ((45629, 45684), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (45637, 45684), True, 'import numpy as np\n'), ((45699, 45740), 'talib.CDLTASUKIGAP', 'talib.CDLTASUKIGAP', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (45717, 45740), False, 'import talib\n'), ((45867, 45921), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (45875, 45921), True, 'import numpy as np\n'), ((45933, 45987), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (45941, 45987), True, 'import numpy as np\n'), ((45998, 46051), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (46006, 46051), True, 'import numpy as np\n'), ((46064, 46119), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (46072, 46119), True, 'import numpy as np\n'), ((46134, 46175), 'talib.CDLTHRUSTING', 'talib.CDLTHRUSTING', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (46152, 46175), False, 'import talib\n'), ((46298, 46352), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (46306, 46352), True, 'import numpy as np\n'), ((46364, 46418), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (46372, 46418), True, 'import numpy as np\n'), ((46429, 46482), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (46437, 46482), True, 'import numpy as np\n'), ((46495, 46550), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (46503, 46550), True, 'import numpy as np\n'), ((46565, 46604), 'talib.CDLTRISTAR', 'talib.CDLTRISTAR', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (46581, 46604), False, 'import talib\n'), ((46731, 46785), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (46739, 46785), True, 'import numpy as np\n'), ((46797, 46851), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (46805, 46851), True, 'import numpy as np\n'), ((46862, 46915), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (46870, 46915), True, 'import numpy as np\n'), ((46928, 46983), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (46936, 46983), True, 'import numpy as np\n'), ((46998, 47042), 'talib.CDLUNIQUE3RIVER', 'talib.CDLUNIQUE3RIVER', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (47019, 47042), False, 'import talib\n'), ((47178, 47232), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (47186, 47232), True, 'import numpy as np\n'), ((47244, 47298), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (47252, 47298), True, 'import numpy as np\n'), ((47309, 47362), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (47317, 47362), True, 'import numpy as np\n'), ((47375, 47430), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (47383, 47430), True, 'import numpy as np\n'), ((47445, 47492), 'talib.CDLUPSIDEGAP2CROWS', 'talib.CDLUPSIDEGAP2CROWS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (47469, 47492), False, 'import talib\n'), ((47642, 47696), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'open']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'open'], dtype='f8')\n", (47650, 47696), True, 'import numpy as np\n'), ((47708, 47762), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'high']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'high'], dtype='f8')\n", (47716, 47762), True, 'import numpy as np\n'), ((47773, 47826), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'low']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'low'], dtype='f8')\n", (47781, 47826), True, 'import numpy as np\n'), ((47839, 47894), 'numpy.array', 'np.array', (["equity.hp.loc[start:end, 'close']"], {'dtype': '"""f8"""'}), "(equity.hp.loc[start:end, 'close'], dtype='f8')\n", (47847, 47894), True, 'import numpy as np\n'), ((47909, 47957), 'talib.CDLXSIDEGAP3METHODS', 'talib.CDLXSIDEGAP3METHODS', (['opn', 'high', 'low', 'close'], {}), '(opn, high, low, close)\n', (47934, 47957), False, 'import talib\n'), ((48099, 48125), 'datetime.date', 'datetime.date', (['(2017)', '(8)', '(30)'], {}), '(2017, 8, 30)\n', (48112, 48125), False, 'import datetime\n'), ((48134, 48155), 'jtrade.core.instrument.equity.Equity', 'Equity.Equity', (['"""AAPL"""'], {}), "('AAPL')\n", (48147, 48155), True, 'import jtrade.core.instrument.equity as Equity\n')] |
import numpy as np
def thresholding(scores, labels):
"""
Args:
scores: Type:ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
Returns:
"""
assert scores.shape == labels.shape
N, Nc = scores.shape
# Sort by descending order of the scores
scores_ = scores.copy()
scores_ = np.fliplr(np.sort(scores_, axis=1))
# Get the indices by which every row in the score gets sorted
labels_sort_indices = np.fliplr(np.argsort(scores, axis=1))
# re arrange the labels according to these indices
labels_sorted = np.array([labels[i][labels_sort_indices[i]] for i in range(0, N)])
tms = []
# You have to go through every data point now
for i in range(N):
labels_row = labels_sorted[i]
scores_row = scores_[i]
# Find the places where 1 changes to 0
boundary_indices = np.where(labels_row == 1)[0]
# Now you know the boundaries between the right class and
# the wrong class
# Find the candidate tms by adding them up
candidate_tms = []
for index in boundary_indices:
if index != (len(labels_row) - 1):
candidate_tms.append(
(scores_row[index] + scores_row[index + 1]) / 2)
# For every candidate tm find the F measure
positive_scores = scores_row[np.where(labels_row == 1)[0]]
negative_scores = scores_row[np.where(labels_row == 0)[0]]
best_tm = None
best_fscore = -np.inf
# This handles the cases of the form [0, 0, 0, 1]
# What should be chosen as the candidate tm here?
if len(candidate_tms) == 0:
if len(boundary_indices) == 1 \
and boundary_indices[0] == len(labels_row) - 1:
best_tm = scores_row[boundary_indices[0]]
# If all the classes are zero [0, 0, 0, 0]
# Then pick the score that is the lowest as the threshold
if len(boundary_indices) == 0:
best_tm = scores_row[len(scores_row) - 1] # since scores row is arranged in the descending order
for tm in candidate_tms:
num_true_positives = len(positive_scores[positive_scores >= tm])
num_true_negatives = len(negative_scores[negative_scores >= tm])
precision = float(num_true_positives) / (num_true_positives + num_true_negatives)
recall = float(num_true_positives) / len(positive_scores)
fscore = (2 * precision * recall)/ (precision + recall)
if fscore > best_fscore:
best_tm = tm
best_tm = round(best_tm, 4)
tms.append(best_tm)
return np.array(tms)
| [
"numpy.argsort",
"numpy.array",
"numpy.sort",
"numpy.where"
] | [((2879, 2892), 'numpy.array', 'np.array', (['tms'], {}), '(tms)\n', (2887, 2892), True, 'import numpy as np\n'), ((543, 567), 'numpy.sort', 'np.sort', (['scores_'], {'axis': '(1)'}), '(scores_, axis=1)\n', (550, 567), True, 'import numpy as np\n'), ((672, 698), 'numpy.argsort', 'np.argsort', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (682, 698), True, 'import numpy as np\n'), ((1075, 1100), 'numpy.where', 'np.where', (['(labels_row == 1)'], {}), '(labels_row == 1)\n', (1083, 1100), True, 'import numpy as np\n'), ((1558, 1583), 'numpy.where', 'np.where', (['(labels_row == 1)'], {}), '(labels_row == 1)\n', (1566, 1583), True, 'import numpy as np\n'), ((1625, 1650), 'numpy.where', 'np.where', (['(labels_row == 0)'], {}), '(labels_row == 0)\n', (1633, 1650), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
from loss import Loss
from npai_stats import NpaiStats
from sigmoid import Sigmoid
class CrossEntropy(Loss):
def __init__(self): pass
def loss(self, y, p):
# Avoid division by zero
p = np.clip(p, 1e-15, 1 - 1e-15)
return - y * np.log(p)
def acc(self, y, p):
return NpaiStats.accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1))
def gradient(self, y, p):
# Avoid division by zero
p = np.clip(p, 1e-15, 1 - 1e-15)
return - (y / p) | [
"numpy.clip",
"numpy.log",
"numpy.argmax"
] | [((262, 290), 'numpy.clip', 'np.clip', (['p', '(1e-15)', '(1 - 1e-15)'], {}), '(p, 1e-15, 1 - 1e-15)\n', (269, 290), True, 'import numpy as np\n'), ((508, 536), 'numpy.clip', 'np.clip', (['p', '(1e-15)', '(1 - 1e-15)'], {}), '(p, 1e-15, 1 - 1e-15)\n', (515, 536), True, 'import numpy as np\n'), ((312, 321), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (318, 321), True, 'import numpy as np\n'), ((388, 408), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (397, 408), True, 'import numpy as np\n'), ((410, 430), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (419, 430), True, 'import numpy as np\n')] |
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
from typing import NoReturn
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
pio.templates.default = "simple_white"
def filter1(data: pd.DataFrame) -> pd.DataFrame:
"""
drops NaN values
"""
# check how many null values there are
# not many rows are having nan values so we'll drop those
data.dropna(inplace=True)
return data
def filter2(data: pd.DataFrame) -> pd.DataFrame:
"""
drops values bellow 0 that doesn't have many occurrences
with other values that does have many occurrences we'll leave to the next filter
"""
# check values distribution
# print(data["Country"].value_counts().sort_index())
# print(data["City"].value_counts().sort_index())
# print(data["Temp"].value_counts().sort_index())
# filter temp == -72 values, consider as noise
data = data.loc[data["Temp"] > -70]
# great improvement !
return data
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
raw_data = pd.read_csv(filename, parse_dates=["Date"])
# null and noisy values filter
filtered_data_1 = filter1(raw_data)
filtered_data_2 = filter2(filtered_data_1)
filtered_data_2["DayOfYear"] = filtered_data_2.Date.apply(lambda row: row.timetuple().tm_yday)
return filtered_data_2
def question_2(df: pd.DataFrame) ->NoReturn:
isr_data = df.loc[df["Country"] == "Israel"]
years_ = set(df["Year"])
data_ = []
for yr in years_:
temp_data = isr_data.loc[df["Year"] == yr]
data_.append(go.Scatter(x=temp_data["DayOfYear"], y=temp_data["Temp"], mode="markers",
name=f"r${yr}$", showlegend=True))
go.Figure(data_, layout=go.Layout(barmode='overlay',
title=r"$Temperature In Israel Throughout The Years$",
xaxis_title=f"$Day Of Year$",
yaxis_title="r$Temperature$")).show()
std = round(isr_data.groupby(["Month"])["Temp"].agg('std'), 2)
months_ = list(range(1, 13))
px.bar(x=months_, y=std, title="r$Israel Months STD$", text=std).show()
def question_3(df: pd.DataFrame) -> NoReturn:
std_data = df.groupby(["Country", "Month"])["Temp"].agg('std')
average_data = df.groupby(["Country", "Month"])["Temp"].agg('mean')
countries_ = set(df["Country"])
months_ = list(range(1, 13))
data_ = []
for c in countries_:
data_.append(go.Scatter(x=months_, y=average_data[c],
error_y=dict(type='data',
array=std_data[c],
visible=True),
mode="markers+lines",
name=f"r${c}$", showlegend=True))
go.Figure(data_, layout=go.Layout(barmode='overlay',
title=r"$Average Temperature Throughout The Years$",
xaxis_title=f"$Day Of Year$",
yaxis_title="r$Average Temperature$")).show()
def question_4(df: pd.DataFrame) -> NoReturn:
isr_data = df.loc[df["Country"] == "Israel"]
train_X, train_y, test_X, test_y = split_train_test(isr_data["DayOfYear"], isr_data["Temp"])
loss_ = []
for k in range(1, 11):
pf = PolynomialFitting(k)
pf.fit(np.array(train_X), np.array(train_y))
loss_.append(round(pf.loss(np.array(test_X), np.array(test_y)), 2))
print(loss_)
px.bar(x=list(range(1, 11)), y=loss_, title="r$Loss Values by K$", text=loss_).show()
def question_5(df: pd.DataFrame) -> NoReturn:
isr_data = df[df["Country"] == "Israel"]
countries_ = ["Jordan", "The Netherlands", "South Africa"]
pf = PolynomialFitting(5)
train_X, train_y = np.array(isr_data["DayOfYear"]), np.array(isr_data["Temp"])
pf.fit(train_X, train_y)
loss_ = []
for c in countries_:
temp_data = df[df["Country"] == c]
test_X, test_y = np.array(temp_data["DayOfYear"]), np.array(temp_data["Temp"])
loss_.append(round(pf.loss(test_X, test_y), 2))
px.bar(x=countries_, y=loss_, title="r$Loss Values by K$", text=loss_).show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
matrix = load_data(
"/Users/omersiton/IML.HUJI/datasets/City_Temperature.csv")
# Question 2 - Exploring data for specific country
question_2(matrix)
# Question 3 - Exploring differences between countries
question_3(matrix)
# Question 4 - Fitting model for different values of `k`
question_4(matrix)
# Question 5 - Evaluating fitted model on different countries
question_5(matrix)
| [
"plotly.graph_objects.Layout",
"pandas.read_csv",
"plotly.express.bar",
"IMLearn.utils.split_train_test",
"numpy.array",
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"IMLearn.learners.regressors.PolynomialFitting"
] | [((1425, 1468), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'parse_dates': "['Date']"}), "(filename, parse_dates=['Date'])\n", (1436, 1468), True, 'import pandas as pd\n'), ((3676, 3733), 'IMLearn.utils.split_train_test', 'split_train_test', (["isr_data['DayOfYear']", "isr_data['Temp']"], {}), "(isr_data['DayOfYear'], isr_data['Temp'])\n", (3692, 3733), False, 'from IMLearn.utils import split_train_test\n'), ((4211, 4231), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', (['(5)'], {}), '(5)\n', (4228, 4231), False, 'from IMLearn.learners.regressors import PolynomialFitting\n'), ((4684, 4701), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4698, 4701), True, 'import numpy as np\n'), ((3789, 3809), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', (['k'], {}), '(k)\n', (3806, 3809), False, 'from IMLearn.learners.regressors import PolynomialFitting\n'), ((4255, 4286), 'numpy.array', 'np.array', (["isr_data['DayOfYear']"], {}), "(isr_data['DayOfYear'])\n", (4263, 4286), True, 'import numpy as np\n'), ((4288, 4314), 'numpy.array', 'np.array', (["isr_data['Temp']"], {}), "(isr_data['Temp'])\n", (4296, 4314), True, 'import numpy as np\n'), ((1952, 2063), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "temp_data['DayOfYear']", 'y': "temp_data['Temp']", 'mode': '"""markers"""', 'name': 'f"""r${yr}$"""', 'showlegend': '(True)'}), "(x=temp_data['DayOfYear'], y=temp_data['Temp'], mode='markers',\n name=f'r${yr}$', showlegend=True)\n", (1962, 2063), True, 'import plotly.graph_objects as go\n'), ((2508, 2572), 'plotly.express.bar', 'px.bar', ([], {'x': 'months_', 'y': 'std', 'title': '"""r$Israel Months STD$"""', 'text': 'std'}), "(x=months_, y=std, title='r$Israel Months STD$', text=std)\n", (2514, 2572), True, 'import plotly.express as px\n'), ((3825, 3842), 'numpy.array', 'np.array', (['train_X'], {}), '(train_X)\n', (3833, 3842), True, 'import numpy as np\n'), ((3844, 3861), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (3852, 3861), True, 'import numpy as np\n'), ((4452, 4484), 'numpy.array', 'np.array', (["temp_data['DayOfYear']"], {}), "(temp_data['DayOfYear'])\n", (4460, 4484), True, 'import numpy as np\n'), ((4486, 4513), 'numpy.array', 'np.array', (["temp_data['Temp']"], {}), "(temp_data['Temp'])\n", (4494, 4513), True, 'import numpy as np\n'), ((4574, 4644), 'plotly.express.bar', 'px.bar', ([], {'x': 'countries_', 'y': 'loss_', 'title': '"""r$Loss Values by K$"""', 'text': 'loss_'}), "(x=countries_, y=loss_, title='r$Loss Values by K$', text=loss_)\n", (4580, 4644), True, 'import plotly.express as px\n'), ((2122, 2274), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'barmode': '"""overlay"""', 'title': '"""$Temperature In Israel Throughout The Years$"""', 'xaxis_title': 'f"""$Day Of Year$"""', 'yaxis_title': '"""r$Temperature$"""'}), "(barmode='overlay', title=\n '$Temperature In Israel Throughout The Years$', xaxis_title=\n f'$Day Of Year$', yaxis_title='r$Temperature$')\n", (2131, 2274), True, 'import plotly.graph_objects as go\n'), ((3268, 3426), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'barmode': '"""overlay"""', 'title': '"""$Average Temperature Throughout The Years$"""', 'xaxis_title': 'f"""$Day Of Year$"""', 'yaxis_title': '"""r$Average Temperature$"""'}), "(barmode='overlay', title=\n '$Average Temperature Throughout The Years$', xaxis_title=\n f'$Day Of Year$', yaxis_title='r$Average Temperature$')\n", (3277, 3426), True, 'import plotly.graph_objects as go\n'), ((3898, 3914), 'numpy.array', 'np.array', (['test_X'], {}), '(test_X)\n', (3906, 3914), True, 'import numpy as np\n'), ((3916, 3932), 'numpy.array', 'np.array', (['test_y'], {}), '(test_y)\n', (3924, 3932), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Bayesian linear regression using variational inference.
This version directly regresses on the data X, rather than regressing
on a placeholder X. Note this prevents the model from conditioning on
other values of X.
References
----------
http://edwardlib.org/tutorials/supervised-regression
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Normal
def build_toy_dataset(N, noise_std=0.1):
X = np.concatenate([np.linspace(0, 2, num=N / 2),
np.linspace(6, 8, num=N / 2)])
y = 5.0 * X + np.random.normal(0, noise_std, size=N)
X = X.reshape((N, 1))
return X, y
ed.set_seed(42)
N = 40 # num data points
D = 1 # num features
# DATA
X_data, y_data = build_toy_dataset(N)
# MODEL
X = tf.cast(X_data, tf.float32)
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
b = Normal(loc=tf.zeros(1), scale=tf.ones(1))
y = Normal(loc=ed.dot(X, w) + b, scale=tf.ones(N))
# INFERENCE
qw = Normal(loc=tf.Variable(tf.random_normal([D])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))
qb = Normal(loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
inference = ed.KLqp({w: qw, b: qb}, data={y: y_data})
inference.run()
| [
"numpy.random.normal",
"tensorflow.random_normal",
"tensorflow.ones",
"edward.KLqp",
"edward.set_seed",
"numpy.linspace",
"tensorflow.cast",
"edward.dot",
"tensorflow.zeros"
] | [((771, 786), 'edward.set_seed', 'ed.set_seed', (['(42)'], {}), '(42)\n', (782, 786), True, 'import edward as ed\n'), ((895, 922), 'tensorflow.cast', 'tf.cast', (['X_data', 'tf.float32'], {}), '(X_data, tf.float32)\n', (902, 922), True, 'import tensorflow as tf\n'), ((1336, 1377), 'edward.KLqp', 'ed.KLqp', (['{w: qw, b: qb}'], {'data': '{y: y_data}'}), '({w: qw, b: qb}, data={y: y_data})\n', (1343, 1377), True, 'import edward as ed\n'), ((692, 730), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_std'], {'size': 'N'}), '(0, noise_std, size=N)\n', (708, 730), True, 'import numpy as np\n'), ((938, 949), 'tensorflow.zeros', 'tf.zeros', (['D'], {}), '(D)\n', (946, 949), True, 'import tensorflow as tf\n'), ((957, 967), 'tensorflow.ones', 'tf.ones', (['D'], {}), '(D)\n', (964, 967), True, 'import tensorflow as tf\n'), ((984, 995), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (992, 995), True, 'import tensorflow as tf\n'), ((1003, 1013), 'tensorflow.ones', 'tf.ones', (['(1)'], {}), '(1)\n', (1010, 1013), True, 'import tensorflow as tf\n'), ((1054, 1064), 'tensorflow.ones', 'tf.ones', (['N'], {}), '(N)\n', (1061, 1064), True, 'import tensorflow as tf\n'), ((593, 621), 'numpy.linspace', 'np.linspace', (['(0)', '(2)'], {'num': '(N / 2)'}), '(0, 2, num=N / 2)\n', (604, 621), True, 'import numpy as np\n'), ((645, 673), 'numpy.linspace', 'np.linspace', (['(6)', '(8)'], {'num': '(N / 2)'}), '(6, 8, num=N / 2)\n', (656, 673), True, 'import numpy as np\n'), ((1030, 1042), 'edward.dot', 'ed.dot', (['X', 'w'], {}), '(X, w)\n', (1036, 1042), True, 'import edward as ed\n'), ((1107, 1128), 'tensorflow.random_normal', 'tf.random_normal', (['[D]'], {}), '([D])\n', (1123, 1128), True, 'import tensorflow as tf\n'), ((1229, 1250), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (1245, 1250), True, 'import tensorflow as tf\n'), ((1176, 1197), 'tensorflow.random_normal', 'tf.random_normal', (['[D]'], {}), '([D])\n', (1192, 1197), True, 'import tensorflow as tf\n'), ((1298, 1319), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (1314, 1319), True, 'import tensorflow as tf\n')] |
import time
from pyscf import scf
import os, time
import numpy as np
from mldftdat.lowmem_analyzers import RHFAnalyzer, UHFAnalyzer
from mldftdat.workflow_utils import get_save_dir, SAVE_ROOT, load_mol_ids
from mldftdat.density import get_exchange_descriptors2, LDA_FACTOR, GG_AMIN
from mldftdat.data import get_unique_coord_indexes_spherical
import logging
import yaml
from argparse import ArgumentParser
"""
Script to compile a dataset from the CIDER DB for training a CIDER functional.
"""
def compile_dataset2(DATASET_NAME, MOL_IDS, SAVE_ROOT, CALC_TYPE, FUNCTIONAL, BASIS,
spherical_atom=False, locx=False, lam=0.5,
version='a', **gg_kwargs):
all_descriptor_data = None
all_rho_data = None
all_values = []
all_weights = []
cutoffs = []
if locx:
raise ValueError('locx setting not supported in this version! (but might be later)')
Analyzer = loc_analyzers.UHFAnalyzer if 'U' in CALC_TYPE \
else loc_analyzers.RHFAnalyzer
else:
Analyzer = UHFAnalyzer if 'U' in CALC_TYPE else RHFAnalyzer
for MOL_ID in MOL_IDS:
logging.info('Computing descriptors for {}'.format(MOL_ID))
data_dir = get_save_dir(SAVE_ROOT, CALC_TYPE, BASIS, MOL_ID, FUNCTIONAL)
start = time.monotonic()
analyzer = Analyzer.load(data_dir + '/data.hdf5')
analyzer.get_ao_rho_data()
if type(analyzer.calc) == scf.hf.RHF:
restricted = True
else:
restricted = False
end = time.monotonic()
logging.info('Analyzer load time {}'.format(end - start))
if spherical_atom:
start = time.monotonic()
indexes = get_unique_coord_indexes_spherical(analyzer.grid.coords)
end = time.monotonic()
logging.info('Index scanning time {}'.format(end - start))
start = time.monotonic()
if restricted:
descriptor_data = get_exchange_descriptors2(
analyzer, restricted=True, version=version,
**gg_kwargs
)
else:
descriptor_data_u, descriptor_data_d = \
get_exchange_descriptors2(
analyzer, restricted=False, version=version,
**gg_kwargs
)
descriptor_data = np.append(descriptor_data_u, descriptor_data_d,
axis = 1)
end = time.monotonic()
logging.info('Get descriptor time {}'.format(end - start))
if locx:
logging.info('Getting loc fx with lambda={}'.format(lam))
values = analyzer.get_loc_fx_energy_density(lam = lam, overwrite=True)
if not restricted:
values = 2 * np.append(analyzer.loc_fx_energy_density_u,
analyzer.loc_fx_energy_density_d)
else:
values = analyzer.get_fx_energy_density()
if not restricted:
values = 2 * np.append(analyzer.fx_energy_density_u,
analyzer.fx_energy_density_d)
rho_data = analyzer.rho_data
if not restricted:
rho_data = 2 * np.append(rho_data[0], rho_data[1], axis=1)
if spherical_atom:
values = values[indexes]
descriptor_data = descriptor_data[:,indexes]
rho_data = rho_data[:,indexes]
weights = analyzer.grid.weights[indexes]
else:
weights = analyzer.grid.weights
if all_descriptor_data is None:
all_descriptor_data = descriptor_data
else:
all_descriptor_data = np.append(all_descriptor_data, descriptor_data,
axis = 1)
if all_rho_data is None:
all_rho_data = rho_data
else:
all_rho_data = np.append(all_rho_data, rho_data, axis=1)
all_values = np.append(all_values, values)
all_weights = np.append(all_weights, weights)
if not restricted:
# two copies for unrestricted case
all_weights = np.append(all_weights, weights)
cutoffs.append(all_values.shape[0])
DATASET_NAME = os.path.basename(DATASET_NAME)
save_dir = os.path.join(SAVE_ROOT, 'DATASETS',
FUNCTIONAL, BASIS, version, DATASET_NAME)
if not os.path.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
rho_file = os.path.join(save_dir, 'rho.npy')
desc_file = os.path.join(save_dir, 'desc.npy')
val_file = os.path.join(save_dir, 'val.npy')
wt_file = os.path.join(save_dir, 'wt.npy')
cut_file = os.path.join(save_dir, 'cut.npy')
np.save(rho_file, all_rho_data)
np.save(desc_file, all_descriptor_data)
np.save(val_file, all_values)
np.save(wt_file, all_weights)
np.save(cut_file, np.array(cutoffs))
settings = {
'DATASET_NAME': DATASET_NAME,
'MOL_IDS': MOL_IDS,
'SAVE_ROOT': SAVE_ROOT,
'CALC_TYPE': CALC_TYPE,
'FUNCTIONAL': FUNCTIONAL,
'BASIS': BASIS,
'spherical_atom': spherical_atom,
'locx': locx,
'lam': lam,
'version': version
}
settings.update(gg_kwargs)
with open(os.path.join(save_dir, 'settings.yaml'), 'w') as f:
yaml.dump(settings, f)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
m_desc = 'Compile datset of exchange descriptors'
parser = ArgumentParser(description=m_desc)
parser.add_argument('mol_id_file', type=str,
help='yaml file from which to read mol_ids to parse')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('--spherical-atom', action='store_true',
default=False, help='whether dataset contains spherical atoms')
parser.add_argument('--locx', action='store_true',
default=False, help='whether to use transformed exchange hole')
parser.add_argument('--lam', default=0.5, type=float,
help='lambda factor for exchange hole, only used if locx=True')
parser.add_argument('--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--gg-a0', default=8.0, type=float)
parser.add_argument('--gg-facmul', default=1.0, type=float)
parser.add_argument('--gg-amin', default=GG_AMIN, type=float)
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
version = args.version.lower()
assert version in ['a', 'b', 'c']
calc_type, mol_ids = load_mol_ids(args.mol_id_file)
assert ('HF' in calc_type) or (args.functional is not None),\
'Must specify functional if not using HF reference.'
if args.mol_id_file.endswith('.yaml'):
mol_id_code = args.mol_id_file[:-5]
else:
mol_id_code = args.mol_id_file
dataname = 'XTR{}_{}'.format(version.upper(), mol_id_code.upper())
if args.spherical_atom:
pass#dataname = 'SPH_' + dataname
if args.locx:
dataname = 'LOCX_' + dataname
if args.suffix is not None:
dataname = dataname + '_' + args.suffix
# TODO remove this if locx supported in the future
args.locx = False
if version == 'c':
compile_dataset2(
dataname, mol_ids, SAVE_ROOT, calc_type, args.functional, args.basis,
spherical_atom=args.spherical_atom, locx=args.locx, lam=args.lam,
version=version, a0=args.gg_a0, fac_mul=args.gg_facmul,
amin=args.gg_amin
)
else:
compile_dataset2(
dataname, mol_ids, SAVE_ROOT, calc_type, args.functional, args.basis,
spherical_atom=args.spherical_atom, locx=args.locx, lam=args.lam,
version=version, a0=args.gg_a0, fac_mul=args.gg_facmul,
amin=args.gg_amin
)
| [
"logging.basicConfig",
"mldftdat.density.get_exchange_descriptors2",
"argparse.ArgumentParser",
"os.makedirs",
"yaml.dump",
"time.monotonic",
"os.path.join",
"numpy.append",
"numpy.array",
"os.path.isdir",
"mldftdat.workflow_utils.load_mol_ids",
"os.path.basename",
"mldftdat.data.get_unique_... | [((4283, 4313), 'os.path.basename', 'os.path.basename', (['DATASET_NAME'], {}), '(DATASET_NAME)\n', (4299, 4313), False, 'import os, time\n'), ((4329, 4406), 'os.path.join', 'os.path.join', (['SAVE_ROOT', '"""DATASETS"""', 'FUNCTIONAL', 'BASIS', 'version', 'DATASET_NAME'], {}), "(SAVE_ROOT, 'DATASETS', FUNCTIONAL, BASIS, version, DATASET_NAME)\n", (4341, 4406), False, 'import os, time\n'), ((4531, 4564), 'os.path.join', 'os.path.join', (['save_dir', '"""rho.npy"""'], {}), "(save_dir, 'rho.npy')\n", (4543, 4564), False, 'import os, time\n'), ((4581, 4615), 'os.path.join', 'os.path.join', (['save_dir', '"""desc.npy"""'], {}), "(save_dir, 'desc.npy')\n", (4593, 4615), False, 'import os, time\n'), ((4631, 4664), 'os.path.join', 'os.path.join', (['save_dir', '"""val.npy"""'], {}), "(save_dir, 'val.npy')\n", (4643, 4664), False, 'import os, time\n'), ((4679, 4711), 'os.path.join', 'os.path.join', (['save_dir', '"""wt.npy"""'], {}), "(save_dir, 'wt.npy')\n", (4691, 4711), False, 'import os, time\n'), ((4727, 4760), 'os.path.join', 'os.path.join', (['save_dir', '"""cut.npy"""'], {}), "(save_dir, 'cut.npy')\n", (4739, 4760), False, 'import os, time\n'), ((4765, 4796), 'numpy.save', 'np.save', (['rho_file', 'all_rho_data'], {}), '(rho_file, all_rho_data)\n', (4772, 4796), True, 'import numpy as np\n'), ((4801, 4840), 'numpy.save', 'np.save', (['desc_file', 'all_descriptor_data'], {}), '(desc_file, all_descriptor_data)\n', (4808, 4840), True, 'import numpy as np\n'), ((4845, 4874), 'numpy.save', 'np.save', (['val_file', 'all_values'], {}), '(val_file, all_values)\n', (4852, 4874), True, 'import numpy as np\n'), ((4879, 4908), 'numpy.save', 'np.save', (['wt_file', 'all_weights'], {}), '(wt_file, all_weights)\n', (4886, 4908), True, 'import numpy as np\n'), ((5433, 5472), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (5452, 5472), False, 'import logging\n'), ((5542, 5576), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'm_desc'}), '(description=m_desc)\n', (5556, 5576), False, 'from argparse import ArgumentParser\n'), ((7010, 7040), 'mldftdat.workflow_utils.load_mol_ids', 'load_mol_ids', (['args.mol_id_file'], {}), '(args.mol_id_file)\n', (7022, 7040), False, 'from mldftdat.workflow_utils import get_save_dir, SAVE_ROOT, load_mol_ids\n'), ((1222, 1283), 'mldftdat.workflow_utils.get_save_dir', 'get_save_dir', (['SAVE_ROOT', 'CALC_TYPE', 'BASIS', 'MOL_ID', 'FUNCTIONAL'], {}), '(SAVE_ROOT, CALC_TYPE, BASIS, MOL_ID, FUNCTIONAL)\n', (1234, 1283), False, 'from mldftdat.workflow_utils import get_save_dir, SAVE_ROOT, load_mol_ids\n'), ((1300, 1316), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1314, 1316), False, 'import os, time\n'), ((1545, 1561), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1559, 1561), False, 'import os, time\n'), ((1893, 1909), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1907, 1909), False, 'import os, time\n'), ((2511, 2527), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2525, 2527), False, 'import os, time\n'), ((4003, 4032), 'numpy.append', 'np.append', (['all_values', 'values'], {}), '(all_values, values)\n', (4012, 4032), True, 'import numpy as np\n'), ((4055, 4086), 'numpy.append', 'np.append', (['all_weights', 'weights'], {}), '(all_weights, weights)\n', (4064, 4086), True, 'import numpy as np\n'), ((4446, 4469), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (4459, 4469), False, 'import os, time\n'), ((4479, 4515), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (4490, 4515), False, 'import os, time\n'), ((4931, 4948), 'numpy.array', 'np.array', (['cutoffs'], {}), '(cutoffs)\n', (4939, 4948), True, 'import numpy as np\n'), ((5377, 5399), 'yaml.dump', 'yaml.dump', (['settings', 'f'], {}), '(settings, f)\n', (5386, 5399), False, 'import yaml\n'), ((1675, 1691), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1689, 1691), False, 'import os, time\n'), ((1714, 1770), 'mldftdat.data.get_unique_coord_indexes_spherical', 'get_unique_coord_indexes_spherical', (['analyzer.grid.coords'], {}), '(analyzer.grid.coords)\n', (1748, 1770), False, 'from mldftdat.data import get_unique_coord_indexes_spherical\n'), ((1789, 1805), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1803, 1805), False, 'import os, time\n'), ((1963, 2050), 'mldftdat.density.get_exchange_descriptors2', 'get_exchange_descriptors2', (['analyzer'], {'restricted': '(True)', 'version': 'version'}), '(analyzer, restricted=True, version=version, **\n gg_kwargs)\n', (1988, 2050), False, 'from mldftdat.density import get_exchange_descriptors2, LDA_FACTOR, GG_AMIN\n'), ((2189, 2277), 'mldftdat.density.get_exchange_descriptors2', 'get_exchange_descriptors2', (['analyzer'], {'restricted': '(False)', 'version': 'version'}), '(analyzer, restricted=False, version=version, **\n gg_kwargs)\n', (2214, 2277), False, 'from mldftdat.density import get_exchange_descriptors2, LDA_FACTOR, GG_AMIN\n'), ((2399, 2454), 'numpy.append', 'np.append', (['descriptor_data_u', 'descriptor_data_d'], {'axis': '(1)'}), '(descriptor_data_u, descriptor_data_d, axis=1)\n', (2408, 2454), True, 'import numpy as np\n'), ((3728, 3783), 'numpy.append', 'np.append', (['all_descriptor_data', 'descriptor_data'], {'axis': '(1)'}), '(all_descriptor_data, descriptor_data, axis=1)\n', (3737, 3783), True, 'import numpy as np\n'), ((3940, 3981), 'numpy.append', 'np.append', (['all_rho_data', 'rho_data'], {'axis': '(1)'}), '(all_rho_data, rho_data, axis=1)\n', (3949, 3981), True, 'import numpy as np\n'), ((4187, 4218), 'numpy.append', 'np.append', (['all_weights', 'weights'], {}), '(all_weights, weights)\n', (4196, 4218), True, 'import numpy as np\n'), ((5317, 5356), 'os.path.join', 'os.path.join', (['save_dir', '"""settings.yaml"""'], {}), "(save_dir, 'settings.yaml')\n", (5329, 5356), False, 'import os, time\n'), ((3270, 3313), 'numpy.append', 'np.append', (['rho_data[0]', 'rho_data[1]'], {'axis': '(1)'}), '(rho_data[0], rho_data[1], axis=1)\n', (3279, 3313), True, 'import numpy as np\n'), ((2825, 2902), 'numpy.append', 'np.append', (['analyzer.loc_fx_energy_density_u', 'analyzer.loc_fx_energy_density_d'], {}), '(analyzer.loc_fx_energy_density_u, analyzer.loc_fx_energy_density_d)\n', (2834, 2902), True, 'import numpy as np\n'), ((3070, 3139), 'numpy.append', 'np.append', (['analyzer.fx_energy_density_u', 'analyzer.fx_energy_density_d'], {}), '(analyzer.fx_energy_density_u, analyzer.fx_energy_density_d)\n', (3079, 3139), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import random
import signal
import numpy as np
from robolearn.old_utils.sampler import Sampler
from robolearn.old_agents import GPSAgent
from robolearn.old_algos.gps.gps import GPS
from robolearn.old_costs.cost_action import CostAction
from robolearn.old_costs.cost_fk import CostFK
from robolearn.old_costs.cost_sum import CostSum
from robolearn.old_costs.cost_utils import RAMP_FINAL_ONLY, RAMP_CONSTANT
from robolearn.old_costs.cost_utils import evall1l2term
from robolearn.old_envs import BigmanEnv
from robolearn.old_policies.lin_gauss_init import init_pd, init_demos
from robolearn.old_policies.policy_opt.policy_opt_tf import PolicyOptTf
from robolearn.old_policies.policy_opt.tf_models import tf_network
from robolearn.old_policies.policy_prior import ConstantPolicyPrior # For MDGPS
from robolearn.old_utils.dynamics.dynamics_lr_prior import DynamicsLRPrior
from robolearn.old_utils.dynamics.dynamics_prior_gmm import DynamicsPriorGMM
from robolearn.old_utils.iit.iit_robots_params import bigman_params
from robolearn.old_utils.print_utils import change_print_color
from robolearn.old_utils.robot_model import RobotModel
from robolearn.old_utils.tasks.bigman.lift_box_utils import Reset_condition_bigman_box_gazebo
from robolearn.old_utils.tasks.bigman.lift_box_utils import create_bigman_box_condition
from robolearn.old_utils.tasks.bigman.lift_box_utils import create_box_relative_pose
from robolearn.old_utils.tasks.bigman.lift_box_utils import create_hand_relative_pose
from robolearn.old_utils.tasks.bigman.lift_box_utils import spawn_box_gazebo
from robolearn.old_utils.tasks.bigman.lift_box_utils import task_space_torque_control_demos, \
load_task_space_torque_control_demos
from robolearn.old_utils.traj_opt.traj_opt_lqr import TrajOptLQR
np.set_printoptions(precision=4, suppress=True, linewidth=1000)
def kill_everything(_signal=None, _frame=None):
print("\n\033[1;31mThe script has been kill by the user!!")
os._exit(1)
signal.signal(signal.SIGINT, kill_everything)
# ################## #
# ################## #
# ### PARAMETERS ### #
# ################## #
# ################## #
# Task parameters
Ts = 0.01
Treach = 5
Tlift = 0 # 3.8
Tinter = 0 # 0.5
Tend = 0 # 0.7
# EndTime = 4 # Using final time to define the horizon
EndTime = Treach + Tinter + Tlift + Tend # Using final time to define the horizon
init_with_demos = False
demos_dir = None # 'TASKSPACE_TORQUE_CTRL_DEMO_2017-07-21_16:32:39'
seed = 6
random.seed(seed)
np.random.seed(seed)
# BOX
box_x = 0.70
box_y = 0.00
box_z = 0.0184
box_yaw = 0 # Degrees
box_size = [0.4, 0.5, 0.3]
final_box_height = 0.0
box_relative_pose = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw)
# Robot Model (It is used to calculate the IK cost)
#robot_urdf_file = os.environ["ROBOTOLOGY_ROOT"]+'/configs/ADVR_shared/bigman/urdf/bigman.urdf'
robot_urdf_file = os.environ["ROBOTOLOGY_ROOT"]+'/robots/iit-bigman-ros-pkg/bigman_urdf/urdf/bigman.urdf'
robot_model = RobotModel(robot_urdf_file)
LH_name = 'LWrMot3'
RH_name = 'RWrMot3'
l_soft_hand_offset = np.array([0.000, -0.030, -0.210])
r_soft_hand_offset = np.array([0.000, 0.030, -0.210])
touching_box_config = np.array([0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0.,
0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633,
#0., 0., 0., -1.5708, 0., 0., 0.,
0., 0.,
0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])
#0., 0., 0., -1.5708, 0., 0., 0.])
# ################### #
# ################### #
# ### ENVIRONMENT ### #
# ################### #
# ################### #
change_print_color.change('BLUE')
print("\nCreating Bigman environment...")
# Robot configuration
interface = 'ros'
body_part_active = 'RA'
body_part_sensed = 'RA'
command_type = 'effort'
left_hand_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0],
hand_x=0.0, hand_y=box_size[1]/2-0.02, hand_z=0.0, hand_yaw=0)
# left_hand_rel_pose[:] = left_hand_rel_pose[[3, 4, 5, 6, 0, 1, 2]] # Changing from 'pos+orient' to 'orient+pos'
right_hand_rel_pose = create_hand_relative_pose([0, 0, 0, 1, 0, 0, 0],
hand_x=0.0, hand_y=-box_size[1]/2+0.02, hand_z=0.0, hand_yaw=0)
# right_hand_rel_pose[:] = right_hand_rel_pose[[3, 4, 5, 6, 0, 1, 2]] # Changing from 'pos+orient' to 'orient+pos'
reset_condition_bigman_box_gazebo_fcn = Reset_condition_bigman_box_gazebo()
observation_active = [{'name': 'joint_state',
'type': 'joint_state',
'ros_topic': '/xbotcore/bigman/joint_states',
# 'fields': ['link_position', 'link_velocity', 'effort'],
'fields': ['link_position', 'link_velocity'],
# 'joints': bigman_params['joint_ids']['UB']},
'joints': bigman_params['joint_ids'][body_part_sensed]},
{'name': 'prev_cmd',
'type': 'prev_cmd'},
{'name': 'distance_left_arm',
'type': 'fk_pose',
'body_name': LH_name,
'body_offset': l_soft_hand_offset,
'target_offset': left_hand_rel_pose,
'fields': ['orientation', 'position']},
{'name': 'distance_right_arm',
'type': 'fk_pose',
'body_name': RH_name,
'body_offset': r_soft_hand_offset,
'target_offset': right_hand_rel_pose,
'fields': ['orientation', 'position']},
# {'name': 'ft_left_arm',
# 'type': 'fk_vel',
# 'ros_topic': None,
# 'body_name': LH_name,
# 'body_offset': l_soft_hand_offset,
# 'fields': ['orientation', 'position']},
# {'name': 'ft_left_arm',
# 'type': 'ft_sensor',
# 'ros_topic': '/xbotcore/bigman/ft/l_arm_ft',
# 'fields': ['force', 'torque']},
# {'name': 'ft_right_arm',
# 'type': 'ft_sensor',
# 'ros_topic': '/xbotcore/bigman/ft/r_arm_ft',
# 'fields': ['force', 'torque']},
# {'name': 'ft_left_leg',
# 'type': 'ft_sensor',
# 'ros_topic': '/xbotcore/bigman/ft/l_leg_ft',
# 'fields': ['force', 'torque']},
# {'name': 'ft_right_leg',
# 'type': 'ft_sensor',
# 'ros_topic': '/xbotcore/bigman/ft/r_leg_ft',
# 'fields': ['force', 'torque']},
# {'name': 'imu1',
# 'type': 'imu',
# 'ros_topic': '/xbotcore/bigman/imu/imu_link',
# 'fields': ['orientation', 'angular_velocity', 'linear_acceleration']},
# {'name': 'optitrack',
# 'type': 'optitrack',
# 'ros_topic': '/optitrack/relative_poses',
# 'fields': ['orientation', 'position'],
# 'bodies': ['box']},
]
state_active = [{'name': 'joint_state',
'type': 'joint_state',
'fields': ['link_position', 'link_velocity'],
'joints': bigman_params['joint_ids'][body_part_sensed]},
{'name': 'prev_cmd',
'type': 'prev_cmd'},
{'name': 'distance_left_arm',
'type': 'fk_pose',
'body_name': LH_name,
'body_offset': l_soft_hand_offset,
'target_offset': left_hand_rel_pose,
'fields': ['orientation', 'position']},
{'name': 'distance_right_arm',
'type': 'fk_pose',
'body_name': RH_name,
'body_offset': r_soft_hand_offset,
'target_offset': right_hand_rel_pose,
'fields': ['orientation', 'position']},
# {'name': 'optitrack',
# 'type': 'optitrack',
# 'fields': ['orientation', 'position'],
# 'bodies': ['box']} # check if it is better relative position with EE(EEs)
]
optional_env_params = {
'temp_object_name': 'box'
}
# Spawn Box first because it is simulation
spawn_box_gazebo(box_relative_pose, box_size=box_size)
# Create a BIGMAN ROS EnvInterface
bigman_env = BigmanEnv(interface=interface, mode='simulation',
body_part_active=body_part_active, command_type=command_type,
observation_active=observation_active,
state_active=state_active,
cmd_freq=int(1/Ts),
robot_dyn_model=robot_model,
optional_env_params=optional_env_params,
reset_simulation_fcn=reset_condition_bigman_box_gazebo_fcn)
# reset_simulation_fcn=reset_condition_bigman_box_gazebo)
action_dim = bigman_env.action_dim
state_dim = bigman_env.state_dim
observation_dim = bigman_env.obs_dim
print("Bigman Environment OK. body_part_active:%s (action_dim=%d). Command_type:%s" % (body_part_active, action_dim,
command_type))
# ################# #
# ################# #
# ##### AGENT ##### #
# ################# #
# ################# #
change_print_color.change('CYAN')
print("\nCreating Bigman Agent...")
policy_params = {
'network_model': tf_network, # tf_network, multi_modal_network, multi_modal_network_fp
'network_params': {
'n_layers': 1, # Hidden layers??
'dim_hidden': [40], # List of size per n_layers
'obs_names': bigman_env.get_obs_info()['names'],
'obs_dof': bigman_env.get_obs_info()['dimensions'], # DoF for observation data tensor
},
# Initialization.
'init_var': 0.1, # Initial policy variance.
'ent_reg': 0.0, # Entropy regularizer (Used to update policy variance)
# Solver hyperparameters.
'iterations': 5000, # Number of iterations per inner iteration (Default:5000). Recommended: 1000?
'batch_size': 15,
'lr': 0.001, # Base learning rate (by default it's fixed).
'lr_policy': 'fixed', # Learning rate policy.
'momentum': 0.9, # Momentum.
'weight_decay': 0.005, # Weight decay.
'solver_type': 'Adam', # Solver type (e.g. 'SGD', 'Adam', etc.).
# set gpu usage.
'use_gpu': 1, # Whether or not to use the GPU for training.
'gpu_id': 0,
'random_seed': 1,
'fc_only_iterations': 0, # TODO: Only forwardcontrol? if it is CNN??
# 'weights_file_prefix': EXP_DIR + 'policy',
}
policy_opt = {
'type': PolicyOptTf,
'hyperparams': policy_params
}
bigman_agent = GPSAgent(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim, policy_opt=policy_opt)
print("Bigman Agent:%s OK\n" % type(bigman_agent))
# ################# #
# ################# #
# ##### COSTS ##### #
# ################# #
# ################# #
# Action Cost
act_cost = {
'type': CostAction,
'wu': np.ones(action_dim) * 1e-4,
'target': None, # Target action value
}
# State Cost
target_distance_right_arm = np.zeros(6)
# state_cost_distance = {
# 'type': CostState,
# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
# 'l1': 0.1, # Weight for l1 norm
# 'l2': 1.0, # Weight for l2 norm
# 'alpha': 1e-2, # Constant added in square root in l1 norm
# 'wp_final_multiplier': 10.0, # Weight multiplier on final time step.
# 'data_types': {
# 'distance_left_arm': {
# # 'wp': np.ones_like(target_state), # State weights - must be set.
# 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 1.0]), # State weights - must be set.
# 'target_state': target_distance_left_arm, # Target state - must be set.
# 'average': None, # (12, 3),
# 'data_idx': bigman_env.get_state_info(name='distance_left_arm')['idx']
# },
# 'distance_right_arm': {
# # 'wp': np.ones_like(target_state), # State weights - must be set.
# 'wp': np.array([1.0, 1.0, 1.0, 3.0, 3.0, 1.0]), # State weights - must be set.
# 'target_state': target_distance_right_arm, # Target state - must be set.
# 'average': None, # (12, 3),
# 'data_idx': bigman_env.get_state_info(name='distance_right_arm')['idx']
# },
# },
# }
RAfk_cost = {
'type': CostFK,
'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
'target_pose': target_distance_right_arm,
'tgt_data_type': 'state', # 'state' or 'observation'
'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
'op_point_name': RH_name,
'op_point_offset': r_soft_hand_offset,
'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],
'joint_ids': bigman_params['joint_ids']['RA'],
'robot_model': robot_model,
# 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos
'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
'evalnorm': evall1l2term,
'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm
'wp_final_multiplier': 1, # 10
}
RAfk_l1_cost = {
'type': CostFK,
'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
'target_pose': target_distance_right_arm,
'tgt_data_type': 'state', # 'state' or 'observation'
'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
'op_point_name': RH_name,
'op_point_offset': r_soft_hand_offset,
'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],
'joint_ids': bigman_params['joint_ids']['RA'],
'robot_model': robot_model,
# 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos
'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
'evalnorm': evall1l2term,
'l1': 1.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
'l2': 0.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm
'wp_final_multiplier': 1, # 10
}
RAfk_l2_cost = {
'type': CostFK,
'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
'target_pose': target_distance_right_arm,
'tgt_data_type': 'state', # 'state' or 'observation'
'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
'op_point_name': RH_name,
'op_point_offset': r_soft_hand_offset,
'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],
'joint_ids': bigman_params['joint_ids']['RA'],
'robot_model': robot_model,
# 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos
'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
'evalnorm': evall1l2term,
'l1': 0.0, # 1.0, # 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
'l2': 1.0, # 1.0, #1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm
'wp_final_multiplier': 1, # 10
}
RAfk_final_cost = {
'type': CostFK,
'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
'target_pose': target_distance_right_arm,
'tgt_data_type': 'state', # 'state' or 'observation'
'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
'op_point_name': RH_name,
'op_point_offset': r_soft_hand_offset,
'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],
'joint_ids': bigman_params['joint_ids']['RA'],
'robot_model': robot_model,
'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
'evalnorm': evall1l2term,
'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm
'wp_final_multiplier': 10,
}
RAfk_l1_final_cost = {
'type': CostFK,
'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
'target_pose': target_distance_right_arm,
'tgt_data_type': 'state', # 'state' or 'observation'
'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
'op_point_name': RH_name,
'op_point_offset': r_soft_hand_offset,
'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],
'joint_ids': bigman_params['joint_ids']['RA'],
'robot_model': robot_model,
'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
'evalnorm': evall1l2term,
'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm
'wp_final_multiplier': 10,
}
RAfk_l2_final_cost = {
'type': CostFK,
'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
'target_pose': target_distance_right_arm,
'tgt_data_type': 'state', # 'state' or 'observation'
'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
'op_point_name': RH_name,
'op_point_offset': r_soft_hand_offset,
'joints_idx': bigman_env.get_state_info(name='link_position')['idx'][7:],
'joint_ids': bigman_params['joint_ids']['RA'],
'robot_model': robot_model,
'wp': np.array([1.0, 1.0, 1.0, 8.0, 10.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
'evalnorm': evall1l2term,
'l1': 0.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm
'wp_final_multiplier': 10,
}
# RAfk_cost = {
# 'type': CostFK,
# 'ramp_option': RAMP_CONSTANT, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
# 'target_pose': target_distance_right_arm,
# 'tgt_data_type': 'state', # 'state' or 'observation'
# 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
# 'op_point_name': RH_name,
# 'op_point_offset': r_soft_hand_offset,
# 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],
# 'joint_ids': bigman_params['joint_ids']['RA'],
# 'robot_model': robot_model,
# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos
# 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
# 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
# 'l2': 1.0e-3, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
# 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm
# 'wp_final_multiplier': 1, # 10
# }
# RAfk_final_cost = {
# 'type': CostFK,
# 'ramp_option': RAMP_FINAL_ONLY, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
# 'target_pose': target_distance_left_arm,
# 'tgt_data_type': 'state', # 'state' or 'observation'
# 'tgt_idx': bigman_env.get_state_info(name='distance_right_arm')['idx'],
# 'op_point_name': RH_name,
# 'op_point_offset': r_soft_hand_offset,
# 'joints_idx': bigman_env.get_state_info(name='link_position')['idx'],
# 'joint_ids': bigman_params['joint_ids']['RA'],
# 'robot_model': robot_model,
# 'wp': np.array([1.0, 1.0, 1.0, 6.0, 6.0, 3.0]), # one dim less because 'quat' error | 1)orient 2)pos
# 'l1': 1.0, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
# 'l2': 0.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
# 'alpha': 1.0e-5, # e-5, # Constant added in square root in l1 norm
# 'wp_final_multiplier': 10,
# }
# target_state_box = box_relative_pose.copy()
# target_state_box[-1] += final_box_height
# state_cost = {
# 'type': CostState,
# 'ramp_option': RAMP_LINEAR, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
# 'l1': 0.0, # Weight for l1 norm
# 'l2': 1.0, # Weight for l2 norm
# 'alpha': 1e-5, # Constant added in square root in l1 norm
# 'wp_final_multiplier': 5.0, # Weight multiplier on final time step.
# 'data_types': {
# 'optitrack': {
# # 'wp': np.ones_like(target_state), # State weights - must be set.
# 'wp': np.array([1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0]), # State weights - must be set.
# 'target_state': target_state_box, # Target state - must be set.
# 'average': None, # (12, 3),
# 'data_idx': bigman_env.get_state_info(name='optitrack')['idx']
# },
# # 'link_position': {
# # 'wp': np.ones_like(target_pos), # State weights - must be set.
# # 'target_state': target_pos, # Target state - must be set.
# # 'average': None, #(12, 3),
# # 'data_idx': bigman_env.get_state_info(name='link_position')['idx']
# # },
# # 'link_velocity': {
# # 'wp': np.ones_like(target_vel), # State weights - must be set.
# # 'target_state': target_vel, # Target state - must be set.
# # 'average': None, #(12, 3),
# # 'data_idx': bigman_env.get_state_info(name='link_velocity')['idx']
# # },
# },
# }
# LAfk_cost = {
# 'type': CostFKRelative,
# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT, LINEAR, QUADRATIC, FINAL_ONLY
# 'target_rel_pose': left_hand_rel_pose,
# 'rel_data_type': 'state', # 'state' or 'observation'
# # 'rel_data_name': 'optitrack', # Name of the state/observation
# # 'rel_idx': bigman_env.get_obs_info(name='optitrack')['idx'],
# 'rel_idx': bigman_env.get_state_info(name='optitrack')['idx'],
# 'data_idx': bigman_env.get_state_info(name='link_position')['idx'],
# 'op_point_name': LH_name,
# 'op_point_offset': l_soft_hand_offset,
# 'joint_ids': bigman_params['joint_ids']['LA'],
# 'robot_model': robot_model,
# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos
# 'wp': np.array([0.5, 0.5, 0.5, 3.0, 3.0, 1.5]), # one dim less because 'quat' error | 1)orient 2)pos
# 'l1': 0.1, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
# 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
# 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm
# 'wp_final_multiplier': 10,
# }
# RAfk_cost = {
# 'type': CostFKRelative,
# 'ramp_option': RAMP_QUADRATIC, # How target cost ramps over time. RAMP_* :CONSTANT,LINEAR, QUADRATIC, FINAL_ONLY
# 'target_rel_pose': right_hand_rel_pose,
# 'rel_data_type': 'observation', # 'state' or 'observation'
# # 'rel_data_name': 'optitrack', # Name of the state/observation
# 'rel_idx': bigman_env.get_obs_info(name='optitrack')['idx'],
# 'data_idx': bigman_env.get_state_info(name='link_position')['idx'],
# 'op_point_name': RH_name,
# 'op_point_offset': r_soft_hand_offset,
# 'joint_ids': bigman_params['joint_ids']['RA'],
# 'robot_model': robot_model,
# # 'wp': np.array([1.0, 1.0, 1.0, 0.7, 0.8, 0.6]), # one dim less because 'quat' error | 1)orient 2)pos
# 'wp': np.array([0.5, 0.5, 0.5, 3.0, 3.0, 1.5]), # one dim less because 'quat' error | 1)orient 2)pos
# 'l1': 0.1, # Weight for l1 norm: log(d^2 + alpha) --> Lorentzian rho-function Precise placement at the target
# 'l2': 1.0, # Weight for l2 norm: d^2 --> Encourages to quickly get the object in the vicinity of the target
# 'alpha': 1.0e-2, # e-5, # Constant added in square root in l1 norm
# 'wp_final_multiplier': 10,
# }
cost_sum = {
'type': CostSum,
# 'costs': [act_cost, state_cost_distance],
# 'weights': [1.0e-2, 1.0e-0],
# 'costs': [act_cost, LAfk_cost, RAfk_cost, state_cost],
# 'weights': [1.0e-2, 1.0e-0, 1.0e-0, 5.0e-1],
#'costs': [act_cost, LAfk_cost, LAfk_final_cost],
#'weights': [1.0e-1, 1.0e-0, 1.0e-0],
'costs': [act_cost, LAfk_l1_cost, LAfk_l2_cost, LAfk_l1_final_cost, LAfk_l2_final_cost, RAfk_l1_cost, RAfk_l2_cost, RAfk_l1_final_cost, RAfk_l2_final_cost],
'weights': [1.0e-2, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0, 1.0e-0],
# 'costs': [act_cost, state_cost],#, LAfk_cost, RAfk_cost],
# 'weights': [0.1, 5.0],
}
# ########## #
# ########## #
# Conditions #
# ########## #
# ########## #
q0 = np.zeros(31)
q0[15] = np.deg2rad(25)
q0[16] = np.deg2rad(40)
q0[18] = np.deg2rad(-75)
#q0[15:15+7] = [0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633]
q0[24] = np.deg2rad(25)
q0[25] = np.deg2rad(-40)
q0[27] = np.deg2rad(-75)
#q0[24:24+7] = [0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633]
box_pose0 = box_relative_pose.copy()
condition0 = create_bigman_box_condition(q0, box_pose0, bigman_env.get_state_info(),
joint_idxs=bigman_params['joint_ids'][body_part_sensed])
bigman_env.add_condition(condition0)
reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose0)
#q1 = np.zeros(31)
q1 = q0.copy()
q1[15] = np.deg2rad(25)
q1[18] = np.deg2rad(-45)
q1[24] = np.deg2rad(25)
q1[27] = np.deg2rad(-45)
box_pose1 = create_box_relative_pose(box_x=box_x+0.02, box_y=box_y+0.02, box_z=box_z, box_yaw=box_yaw+5)
condition1 = create_bigman_box_condition(q1, box_pose1, bigman_env.get_state_info(),
joint_idxs=bigman_params['joint_ids'][body_part_sensed])
bigman_env.add_condition(condition1)
reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose1)
q2 = q0.copy()
q2[16] = np.deg2rad(50)
q2[18] = np.deg2rad(-50)
q2[25] = np.deg2rad(-50)
q2[27] = np.deg2rad(-50)
box_pose2 = create_box_relative_pose(box_x=box_x-0.02, box_y=box_y-0.02, box_z=box_z, box_yaw=box_yaw-5)
condition2 = create_bigman_box_condition(q2, box_pose2, bigman_env.get_state_info(),
joint_idxs=bigman_params['joint_ids'][body_part_sensed])
bigman_env.add_condition(condition2)
reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose2)
# q3 = q0.copy()
# q3[16] = np.deg2rad(0)
# q3[18] = np.deg2rad(0)
# q3[25] = np.deg2rad(0)
# q3[27] = np.deg2rad(0)
# box_pose3 = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw+5)
# condition3 = create_bigman_box_condition(q3, box_pose3, bigman_env.get_state_info(),
# joint_idxs=bigman_params['joint_ids'][body_part_sensed])
# bigman_env.add_condition(condition3)
# reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose3)
# q4 = q0.copy()
# box_pose4 = create_box_relative_pose(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw-5)
# condition4 = create_bigman_box_condition(q4, box_pose4, bigman_env.get_state_info(),
# joint_idxs=bigman_params['joint_ids'][body_part_sensed])
# bigman_env.add_condition(condition4)
# reset_condition_bigman_box_gazebo_fcn.add_reset_poses(box_pose4)
# #################### #
# #################### #
# ## DEMONSTRATIONS ## #
# #################### #
# #################### #
if init_with_demos is True:
change_print_color.change('GREEN')
if demos_dir is None:
task_space_torque_control_demos_params = {
'n_samples': 5,
'conditions_to_sample': range(len(bigman_env.get_conditions())),
'Treach': Treach,
'Tlift': Tlift,
'Tinter': Tinter,
'Tend': Tend,
'Ts': Ts,
'noisy': False,
'noise_hyperparams': {
'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU
'smooth_noise': False, # Whether or not to perform smoothing of noise
'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01
'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.
},
'bigman_env': bigman_env,
'box_relative_pose': box_relative_pose,
'box_size': box_size,
'final_box_height': final_box_height,
}
demos_samples = task_space_torque_control_demos(**task_space_torque_control_demos_params)
bigman_env.reset(time=2, cond=0)
else:
demos_samples = load_task_space_torque_control_demos(demos_dir)
print('Demos samples has been obtained from directory %s' % demos_dir)
else:
demos_samples = None
# ######################## #
# ######################## #
# ## LEARNING ALGORITHM ## #
# ######################## #
# ######################## #
change_print_color.change('YELLOW')
print("\nConfiguring learning algorithm...\n")
# Learning params
resume_training_itr = 91 # Resume from previous training iteration
data_files_dir = 'GPS_2017-08-14_10:35:40' # 'GPS_2017-08-04_09:40:59' # In case we want to resume from previous training
traj_opt_method = {'type': TrajOptLQR,
'del0': 1e-4, # Dual variable updates for non-SPD Q-function (non-SPD correction step).
# 'eta_error_threshold': 1e16, # TODO: REMOVE, it is not used
'min_eta': 1e-8, # At min_eta, kl_div > kl_step
'max_eta': 1e16, # At max_eta, kl_div < kl_step
'cons_per_step': False, # Whether or not to enforce separate KL constraints at each time step.
'use_prev_distr': False, # Whether or not to measure expected KL under the previous traj distr.
'update_in_bwd_pass': True, # Whether or not to update the TVLG controller during the bwd pass.
}
# traj_opt_method = {'type': TrajOptPI2,
# 'del0': 1e-4, # Dual variable updates for non-PD Q-function.
# 'kl_threshold': 1.0, # KL-divergence threshold between old and new policies.
# 'covariance_damping': 2.0, # If greater than zero, covariance is computed as a multiple of the old
# # covariance. Multiplier is taken to the power (1 / covariance_damping).
# # If greater than one, slows down convergence and keeps exploration noise
# # high for more iterations.
# 'min_temperature': 0.001, # Minimum bound of the temperature optimization for the soft-max
# # probabilities of the policy samples.
# 'use_sumexp': False,
# 'pi2_use_dgd_eta': False,
# 'pi2_cons_per_step': True,
# }
if demos_samples is None:
# # init_traj_distr values can be lists if they are different for each condition
# init_traj_distr = {'type': init_lqr,
# # Parameters to calculate initial COST function based on stiffness
# 'init_var': 3.0e-1, # Initial Variance
# 'stiffness': 5.0e-1, # Stiffness (multiplies q)
# 'stiffness_vel': 0.01, # 0.5, # Stiffness_vel*stiffness (multiplies qdot)
# 'final_weight': 10.0, # Multiplies cost at T
# # Parameters for guessing dynamics
# 'init_acc': np.zeros(action_dim), # dU vector(np.array) of accelerations, default zeros.
# #'init_gains': 1.0*np.ones(action_dim), # dU vector(np.array) of gains, default ones.
# #'init_gains': 1.0/np.array([5000.0, 8000.0, 5000.0, 5000.0, 300.0, 2000.0, 300.0]), # dU vector(np.array) of gains, default ones.
# 'init_gains': np.ones(action_dim), # dU vector(np.array) of gains, default ones.
# }
init_traj_distr = {'type': init_pd,
#'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active]))*0.3e-1, # Initial variance (Default:10)
'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,
3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0,
#'init_var': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Initial variance (Default:10)
# 'init_var': np.array([3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1,
# 3.0e-1, 3.0e-1, 3.0e-1, 3.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*1.0, # Initial variance (Default:10)
'pos_gains': 0.001, #np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 5.0e-2, 5.0e-2, 5.0e-2])*1.0e+1, # 0.001, # Position gains (Default:10)
'vel_gains_mult': 0.01, # Velocity gains multiplier on pos_gains
'init_action_offset': None,
'dJoints': len(bigman_params['joint_ids'][body_part_sensed]), # Total joints in state
}
else:
init_traj_distr = {'type': init_demos,
'sample_lists': demos_samples
}
learned_dynamics = {'type': DynamicsLRPrior,
'regularization': 1e-6,
'prior': {
'type': DynamicsPriorGMM,
'max_clusters': 20, # Maximum number of clusters to fit.
'min_samples_per_cluster': 40, # Minimum samples per cluster.
'max_samples': 20, # Max. number of trajectories to use for fitting the GMM at any given time.
'strength': 1.0, # Adjusts the strength of the prior.
},
}
# gps_algo = 'pigps'
# gps_algo_hyperparams = {'init_pol_wt': 0.01,
# 'policy_sample_mode': 'add'
# }
gps_algo = 'mdgps'
gps_algo_hyperparams = {'init_pol_wt': 0.01, # TODO: remove need for init_pol_wt in MDGPS (It should not work with MDGPS)
'policy_sample_mode': 'add',
'step_rule': 'laplace', # Whether to use 'laplace' or 'mc' cost in step adjustment
'policy_prior': {'type': ConstantPolicyPrior,
'strength': 1e-4,
},
}
gps_hyperparams = {
'T': int(EndTime/Ts), # Total points
'dt': Ts,
'iterations': 91, # 100 # 2000 # GPS episodes, "inner iterations" --> K iterations
'test_after_iter': True, # If test the learned policy after an iteration in the RL algorithm
'test_samples': 2, # Samples from learned policy after an iteration PER CONDITION (only if 'test_after_iter':True)
# Samples
'num_samples': 5, # 20 # Samples for exploration trajs --> N samples
'noisy_samples': True,
'sample_on_policy': False, # Whether generate on-policy samples or off-policy samples
#'noise_var_scale': np.array([5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2, 5.0e-2]), # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)
#'noise_var_scale': np.array([1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1, 1.0e-1])*10, # Scale to Gaussian noise: N(0,1)*sqrt(noise_var_scale)
'smooth_noise': True, # Apply Gaussian filter to noise generated
'smooth_noise_var': 5.0e+0, # Variance to apply to Gaussian Filter
'smooth_noise_renormalize': True, # Renormalize smooth noise to have variance=1
'noise_var_scale': np.ones(len(bigman_params['joint_ids'][body_part_active])), # Scale to Gaussian noise: N(0, 1)*sqrt(noise_var_scale), only if smooth_noise_renormalize
'cost': cost_sum,
# Conditions
'conditions': len(bigman_env.get_conditions()), # Total number of initial conditions
'train_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for training
'test_conditions': range(len(bigman_env.get_conditions())), # Indexes of conditions used for testing
# KL step (epsilon)
'kl_step': 0.2, # Kullback-Leibler step (base_step)
'min_step_mult': 0.01, # Min possible value of step multiplier (multiplies kl_step in LQR)
'max_step_mult': 1.0, #3 # 10.0, # Max possible value of step multiplier (multiplies kl_step in LQR)
# Others
'gps_algo': gps_algo,
'gps_algo_hyperparams': gps_algo_hyperparams,
'init_traj_distr': init_traj_distr,
'fit_dynamics': True,
'dynamics': learned_dynamics,
'initial_state_var': 1e-6, # Max value for x0sigma in trajectories # TODO: CHECK THIS VALUE, maybe it is too low
'traj_opt': traj_opt_method,
'max_ent_traj': 0.0, # Weight of maximum entropy term in trajectory optimization
'data_files_dir': data_files_dir,
}
learn_algo = GPS(agent=bigman_agent, env=bigman_env, **gps_hyperparams)
print("Learning algorithm: %s OK\n" % type(learn_algo))
# import numpy as np
# dX = bigman_env.state_dim
# dU = bigman_env.action_dim
# dO = bigman_env.obs_dim
# T = gps_hyperparams['T']
# all_actions = np.zeros((T, dU))
# all_states = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dX))
# all_obs = np.tile(np.expand_dims(np.linspace(0.5, 0, T), axis=1), (1, dO))
# sample = Sample(bigman_env, T)
# sample.set_acts(all_actions) # Set all actions at the same time
# sample.set_obs(all_obs) # Set all obs at the same time
# sample.set_states(all_states) # Set all states at the same time
# costs = learn_algo._eval_conditions_sample_list_cost([SampleList([sample])])
# raw_input('zacataaaaaaaaa')
# Optimize policy using learning algorithm
print("Running Learning Algorithm!!!")
training_successful = learn_algo.run(resume_training_itr)
if training_successful:
print("Learning Algorithm has finished SUCCESSFULLY!")
else:
print("Learning Algorithm has finished WITH ERRORS!")
# ############################## #
# ############################## #
# ## SAMPLE FROM FINAL POLICY ## #
# ############################## #
# ############################## #
if training_successful:
conditions_to_sample = gps_hyperparams['test_conditions']
change_print_color.change('GREEN')
n_samples = 1
noisy = False
sampler_hyperparams = {
'noisy': noisy,
'noise_var_scale': 0.0001, # It can be a np.array() with dim=dU
'smooth_noise': False, # Whether or not to perform smoothing of noise
'smooth_noise_var': 0.01, # If smooth=True, applies a Gaussian filter with this variance. E.g. 0.01
'smooth_noise_renormalize': False, # If smooth=True, renormalizes data to have variance 1 after smoothing.
'T': int(EndTime/Ts)*10, # Total points
'dt': Ts
}
sampler = Sampler(bigman_agent.policy, bigman_env, **sampler_hyperparams)
print("Sampling from final policy!!!")
sample_lists = list()
for cond_idx in conditions_to_sample:
raw_input("\nSampling %d times from condition %d and with policy:%s (noisy:%s). \n Press a key to continue..." %
(n_samples, cond_idx, type(bigman_agent.policy), noisy))
sample_list = sampler.take_samples(n_samples, cond=cond_idx, noisy=noisy)
# costs = learn_algo._eval_conditions_sample_list_cost([sample_list])
# # print(costs)
# # raw_input('pppp')
# sample_lists.append(sample_list)
bigman_env.reset(time=1, cond=0)
print("The script has finished!")
os._exit(0)
| [
"robolearn.old_utils.print_utils.change_print_color.change",
"robolearn.old_utils.tasks.bigman.lift_box_utils.load_task_space_torque_control_demos",
"numpy.array",
"robolearn.old_utils.tasks.bigman.lift_box_utils.Reset_condition_bigman_box_gazebo",
"robolearn.old_utils.tasks.bigman.lift_box_utils.spawn_box_... | [((1814, 1877), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)', 'linewidth': '(1000)'}), '(precision=4, suppress=True, linewidth=1000)\n', (1833, 1877), True, 'import numpy as np\n'), ((2009, 2054), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'kill_everything'], {}), '(signal.SIGINT, kill_everything)\n', (2022, 2054), False, 'import signal\n'), ((2504, 2521), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2515, 2521), False, 'import random\n'), ((2522, 2542), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2536, 2542), True, 'import numpy as np\n'), ((2684, 2769), 'robolearn.old_utils.tasks.bigman.lift_box_utils.create_box_relative_pose', 'create_box_relative_pose', ([], {'box_x': 'box_x', 'box_y': 'box_y', 'box_z': 'box_z', 'box_yaw': 'box_yaw'}), '(box_x=box_x, box_y=box_y, box_z=box_z, box_yaw=box_yaw\n )\n', (2708, 2769), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import create_box_relative_pose\n'), ((3034, 3061), 'robolearn.old_utils.robot_model.RobotModel', 'RobotModel', (['robot_urdf_file'], {}), '(robot_urdf_file)\n', (3044, 3061), False, 'from robolearn.old_utils.robot_model import RobotModel\n'), ((3123, 3152), 'numpy.array', 'np.array', (['[0.0, -0.03, -0.21]'], {}), '([0.0, -0.03, -0.21])\n', (3131, 3152), True, 'import numpy as np\n'), ((3178, 3206), 'numpy.array', 'np.array', (['[0.0, 0.03, -0.21]'], {}), '([0.0, 0.03, -0.21])\n', (3186, 3206), True, 'import numpy as np\n'), ((3234, 3456), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633, 0.0, 0.0, \n 0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0568, 0.2386, -0.2337, -1.6803, 0.2226, 0.0107, 0.5633, 0.0,\n 0.0, 0.0568, -0.2386, 0.2337, -1.6803, -0.2226, 0.0107, -0.5633])\n', (3242, 3456), True, 'import numpy as np\n'), ((3876, 3909), 'robolearn.old_utils.print_utils.change_print_color.change', 'change_print_color.change', (['"""BLUE"""'], {}), "('BLUE')\n", (3901, 3909), False, 'from robolearn.old_utils.print_utils import change_print_color\n'), ((4088, 4208), 'robolearn.old_utils.tasks.bigman.lift_box_utils.create_hand_relative_pose', 'create_hand_relative_pose', (['[0, 0, 0, 1, 0, 0, 0]'], {'hand_x': '(0.0)', 'hand_y': '(box_size[1] / 2 - 0.02)', 'hand_z': '(0.0)', 'hand_yaw': '(0)'}), '([0, 0, 0, 1, 0, 0, 0], hand_x=0.0, hand_y=\n box_size[1] / 2 - 0.02, hand_z=0.0, hand_yaw=0)\n', (4113, 4208), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import create_hand_relative_pose\n'), ((4383, 4504), 'robolearn.old_utils.tasks.bigman.lift_box_utils.create_hand_relative_pose', 'create_hand_relative_pose', (['[0, 0, 0, 1, 0, 0, 0]'], {'hand_x': '(0.0)', 'hand_y': '(-box_size[1] / 2 + 0.02)', 'hand_z': '(0.0)', 'hand_yaw': '(0)'}), '([0, 0, 0, 1, 0, 0, 0], hand_x=0.0, hand_y=-\n box_size[1] / 2 + 0.02, hand_z=0.0, hand_yaw=0)\n', (4408, 4504), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import create_hand_relative_pose\n'), ((4701, 4736), 'robolearn.old_utils.tasks.bigman.lift_box_utils.Reset_condition_bigman_box_gazebo', 'Reset_condition_bigman_box_gazebo', ([], {}), '()\n', (4734, 4736), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import Reset_condition_bigman_box_gazebo\n'), ((8890, 8944), 'robolearn.old_utils.tasks.bigman.lift_box_utils.spawn_box_gazebo', 'spawn_box_gazebo', (['box_relative_pose'], {'box_size': 'box_size'}), '(box_relative_pose, box_size=box_size)\n', (8906, 8944), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import spawn_box_gazebo\n'), ((10002, 10035), 'robolearn.old_utils.print_utils.change_print_color.change', 'change_print_color.change', (['"""CYAN"""'], {}), "('CYAN')\n", (10027, 10035), False, 'from robolearn.old_utils.print_utils import change_print_color\n'), ((11375, 11476), 'robolearn.old_agents.GPSAgent', 'GPSAgent', ([], {'act_dim': 'action_dim', 'obs_dim': 'observation_dim', 'state_dim': 'state_dim', 'policy_opt': 'policy_opt'}), '(act_dim=action_dim, obs_dim=observation_dim, state_dim=state_dim,\n policy_opt=policy_opt)\n', (11383, 11476), False, 'from robolearn.old_agents import GPSAgent\n'), ((11813, 11824), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (11821, 11824), True, 'import numpy as np\n'), ((26865, 26877), 'numpy.zeros', 'np.zeros', (['(31)'], {}), '(31)\n', (26873, 26877), True, 'import numpy as np\n'), ((26887, 26901), 'numpy.deg2rad', 'np.deg2rad', (['(25)'], {}), '(25)\n', (26897, 26901), True, 'import numpy as np\n'), ((26911, 26925), 'numpy.deg2rad', 'np.deg2rad', (['(40)'], {}), '(40)\n', (26921, 26925), True, 'import numpy as np\n'), ((26935, 26950), 'numpy.deg2rad', 'np.deg2rad', (['(-75)'], {}), '(-75)\n', (26945, 26950), True, 'import numpy as np\n'), ((27038, 27052), 'numpy.deg2rad', 'np.deg2rad', (['(25)'], {}), '(25)\n', (27048, 27052), True, 'import numpy as np\n'), ((27062, 27077), 'numpy.deg2rad', 'np.deg2rad', (['(-40)'], {}), '(-40)\n', (27072, 27077), True, 'import numpy as np\n'), ((27087, 27102), 'numpy.deg2rad', 'np.deg2rad', (['(-75)'], {}), '(-75)\n', (27097, 27102), True, 'import numpy as np\n'), ((27549, 27563), 'numpy.deg2rad', 'np.deg2rad', (['(25)'], {}), '(25)\n', (27559, 27563), True, 'import numpy as np\n'), ((27573, 27588), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (27583, 27588), True, 'import numpy as np\n'), ((27598, 27612), 'numpy.deg2rad', 'np.deg2rad', (['(25)'], {}), '(25)\n', (27608, 27612), True, 'import numpy as np\n'), ((27622, 27637), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (27632, 27637), True, 'import numpy as np\n'), ((27650, 27753), 'robolearn.old_utils.tasks.bigman.lift_box_utils.create_box_relative_pose', 'create_box_relative_pose', ([], {'box_x': '(box_x + 0.02)', 'box_y': '(box_y + 0.02)', 'box_z': 'box_z', 'box_yaw': '(box_yaw + 5)'}), '(box_x=box_x + 0.02, box_y=box_y + 0.02, box_z=\n box_z, box_yaw=box_yaw + 5)\n', (27674, 27753), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import create_box_relative_pose\n'), ((28053, 28067), 'numpy.deg2rad', 'np.deg2rad', (['(50)'], {}), '(50)\n', (28063, 28067), True, 'import numpy as np\n'), ((28077, 28092), 'numpy.deg2rad', 'np.deg2rad', (['(-50)'], {}), '(-50)\n', (28087, 28092), True, 'import numpy as np\n'), ((28102, 28117), 'numpy.deg2rad', 'np.deg2rad', (['(-50)'], {}), '(-50)\n', (28112, 28117), True, 'import numpy as np\n'), ((28127, 28142), 'numpy.deg2rad', 'np.deg2rad', (['(-50)'], {}), '(-50)\n', (28137, 28142), True, 'import numpy as np\n'), ((28155, 28258), 'robolearn.old_utils.tasks.bigman.lift_box_utils.create_box_relative_pose', 'create_box_relative_pose', ([], {'box_x': '(box_x - 0.02)', 'box_y': '(box_y - 0.02)', 'box_z': 'box_z', 'box_yaw': '(box_yaw - 5)'}), '(box_x=box_x - 0.02, box_y=box_y - 0.02, box_z=\n box_z, box_yaw=box_yaw - 5)\n', (28179, 28258), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import create_box_relative_pose\n'), ((31115, 31150), 'robolearn.old_utils.print_utils.change_print_color.change', 'change_print_color.change', (['"""YELLOW"""'], {}), "('YELLOW')\n", (31140, 31150), False, 'from robolearn.old_utils.print_utils import change_print_color\n'), ((39277, 39335), 'robolearn.old_algos.gps.gps.GPS', 'GPS', ([], {'agent': 'bigman_agent', 'env': 'bigman_env'}), '(agent=bigman_agent, env=bigman_env, **gps_hyperparams)\n', (39280, 39335), False, 'from robolearn.old_algos.gps.gps import GPS\n'), ((41898, 41909), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (41906, 41909), False, 'import os\n'), ((1996, 2007), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (2004, 2007), False, 'import os\n'), ((13813, 13853), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 6.0, 6.0, 3.0]'], {}), '([1.0, 1.0, 1.0, 6.0, 6.0, 3.0])\n', (13821, 13853), True, 'import numpy as np\n'), ((15000, 15040), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 6.0, 6.0, 3.0]'], {}), '([1.0, 1.0, 1.0, 6.0, 6.0, 3.0])\n', (15008, 15040), True, 'import numpy as np\n'), ((16187, 16227), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 6.0, 6.0, 3.0]'], {}), '([1.0, 1.0, 1.0, 6.0, 6.0, 3.0])\n', (16195, 16227), True, 'import numpy as np\n'), ((17271, 17312), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 8.0, 10.0, 3.0]'], {}), '([1.0, 1.0, 1.0, 8.0, 10.0, 3.0])\n', (17279, 17312), True, 'import numpy as np\n'), ((18320, 18361), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 8.0, 10.0, 3.0]'], {}), '([1.0, 1.0, 1.0, 8.0, 10.0, 3.0])\n', (18328, 18361), True, 'import numpy as np\n'), ((19369, 19410), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 8.0, 10.0, 3.0]'], {}), '([1.0, 1.0, 1.0, 8.0, 10.0, 3.0])\n', (19377, 19410), True, 'import numpy as np\n'), ((29612, 29646), 'robolearn.old_utils.print_utils.change_print_color.change', 'change_print_color.change', (['"""GREEN"""'], {}), "('GREEN')\n", (29637, 29646), False, 'from robolearn.old_utils.print_utils import change_print_color\n'), ((40606, 40640), 'robolearn.old_utils.print_utils.change_print_color.change', 'change_print_color.change', (['"""GREEN"""'], {}), "('GREEN')\n", (40631, 40640), False, 'from robolearn.old_utils.print_utils import change_print_color\n'), ((41197, 41260), 'robolearn.old_utils.sampler.Sampler', 'Sampler', (['bigman_agent.policy', 'bigman_env'], {}), '(bigman_agent.policy, bigman_env, **sampler_hyperparams)\n', (41204, 41260), False, 'from robolearn.old_utils.sampler import Sampler\n'), ((11697, 11716), 'numpy.ones', 'np.ones', (['action_dim'], {}), '(action_dim)\n', (11704, 11716), True, 'import numpy as np\n'), ((30661, 30734), 'robolearn.old_utils.tasks.bigman.lift_box_utils.task_space_torque_control_demos', 'task_space_torque_control_demos', ([], {}), '(**task_space_torque_control_demos_params)\n', (30692, 30734), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import task_space_torque_control_demos, load_task_space_torque_control_demos\n'), ((30810, 30857), 'robolearn.old_utils.tasks.bigman.lift_box_utils.load_task_space_torque_control_demos', 'load_task_space_torque_control_demos', (['demos_dir'], {}), '(demos_dir)\n', (30846, 30857), False, 'from robolearn.old_utils.tasks.bigman.lift_box_utils import task_space_torque_control_demos, load_task_space_torque_control_demos\n'), ((34523, 34608), 'numpy.array', 'np.array', (['[0.3, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1, 0.3, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1]'], {}), '([0.3, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1, 0.3, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1]\n )\n', (34531, 34608), True, 'import numpy as np\n')] |
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D, proj3d
import matplotlib.pyplot as plt
import numpy as np
import itertools
import oloid.circle
fig = plt.figure()
ax = fig.gca(projection='3d')
# #dibujar cubo
r = [-1, 1]
for s, e in itertools.combinations(np.array(list(itertools.product(r,r,r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
ax.plot3D(*zip(s,e), color="b")
#dibujar punto
ax.scatter([0],[0],[0],color="g",s=100)
class Arrow3D(mpatches.FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
mpatches.FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
mpatches.FancyArrowPatch.draw(self, renderer)
#m=float(raw_input())
a = Arrow3D([0,0],[0,1],[0,0], mutation_scale=20, lw=1, arrowstyle="-|>", color="k")
b = Arrow3D([0,-1],[0,0],[0,0], mutation_scale=20, lw=1, arrowstyle="-|>", color="r")
c = Arrow3D([0,0],[0,0],[0,1], mutation_scale=20, lw=1, arrowstyle="-|>", color="b")
d = Arrow3D([0,0],[0,0],[0,-1], mutation_scale=20, lw=1, arrowstyle="-|>", color="g")
e = Arrow3D([0,1],[0,0],[0,0], mutation_scale=20, lw=1, arrowstyle="-|>", color="c")
f = Arrow3D([0,0],[0,-0.5],[0,0], mutation_scale=20, lw=1, arrowstyle="-|>", color="m")
my_circle1 = oloid.circle.circle([0, 0, 0], [0, 0, 1], 1, 100)
my_circle2 = oloid.circle.circle([1, 0, 0], [0, 1, 0], 1, 100)
ax.add_artist(my_circle1)
ax.add_artist(my_circle2)
ax.add_artist(a)
ax.add_artist(b)
ax.add_artist(c)
ax.add_artist(d)
ax.add_artist(e)
ax.add_artist(f)
ax.legend()
plt.show() | [
"numpy.abs",
"itertools.product",
"matplotlib.pyplot.figure",
"matplotlib.patches.FancyArrowPatch.__init__",
"matplotlib.patches.FancyArrowPatch.draw",
"mpl_toolkits.mplot3d.proj3d.proj_transform",
"matplotlib.pyplot.show"
] | [((215, 227), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (225, 227), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1814, 1816), True, 'import matplotlib.pyplot as plt\n'), ((609, 681), 'matplotlib.patches.FancyArrowPatch.__init__', 'mpatches.FancyArrowPatch.__init__', (['self', '(0, 0)', '(0, 0)', '*args'], {}), '(self, (0, 0), (0, 0), *args, **kwargs)\n', (642, 681), True, 'import matplotlib.patches as mpatches\n'), ((808, 859), 'mpl_toolkits.mplot3d.proj3d.proj_transform', 'proj3d.proj_transform', (['xs3d', 'ys3d', 'zs3d', 'renderer.M'], {}), '(xs3d, ys3d, zs3d, renderer.M)\n', (829, 859), False, 'from mpl_toolkits.mplot3d import Axes3D, proj3d\n'), ((924, 969), 'matplotlib.patches.FancyArrowPatch.draw', 'mpatches.FancyArrowPatch.draw', (['self', 'renderer'], {}), '(self, renderer)\n', (953, 969), True, 'import matplotlib.patches as mpatches\n'), ((336, 362), 'itertools.product', 'itertools.product', (['r', 'r', 'r'], {}), '(r, r, r)\n', (353, 362), False, 'import itertools\n'), ((382, 395), 'numpy.abs', 'np.abs', (['(s - e)'], {}), '(s - e)\n', (388, 395), True, 'import numpy as np\n')] |
import numpy as np
import dnplab as dnp
def get_gauss_3d(std_noise=0.0):
x = np.r_[0:100]
y = np.r_[0:100]
z = np.r_[0:100]
noise = std_noise * np.random.randn(len(x), len(y), len(z))
gauss = np.exp(-1.0 * (x - 50) ** 2.0 / (10.0 ** 2))
gauss_3d = (
gauss.reshape(-1, 1, 1) * gauss.reshape(1, -1, 1) * gauss.reshape(1, 1, -1)
)
gauss_3d += noise
return x, y, z, gauss_3d
def test3d(std_noise=0.0):
x, y, z, gauss_3d = get_gauss_3d(std_noise)
test_data = dnp.DNPData(gauss_3d, ["x", "y", "z"], [x, y, z])
return test_data
| [
"numpy.exp",
"dnplab.DNPData"
] | [((215, 257), 'numpy.exp', 'np.exp', (['(-1.0 * (x - 50) ** 2.0 / 10.0 ** 2)'], {}), '(-1.0 * (x - 50) ** 2.0 / 10.0 ** 2)\n', (221, 257), True, 'import numpy as np\n'), ((511, 560), 'dnplab.DNPData', 'dnp.DNPData', (['gauss_3d', "['x', 'y', 'z']", '[x, y, z]'], {}), "(gauss_3d, ['x', 'y', 'z'], [x, y, z])\n", (522, 560), True, 'import dnplab as dnp\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 18:30:06 2018
@author: malopez
"""
import numpy as np
from numpy import random_intel
def computeCollisions(alpha, N, rem, dt, rv_max, vel):
# First we have to determine the maximum number of candidate collisions
n_cols_max = (N * rv_max * dt /2) + rem
# Remaining collisions (<1) (to be computed in next time_step)
rem = n_cols_max - int(n_cols_max)
# We only use the integer part
n_cols_max = int(n_cols_max)
# It is more efficient to generate all random numbers at once
random_intel.seed(brng='MT2203')
# We choose multiple (n_cols_max) random pairs of particles
random_pairs = random_intel.choice(N, size=(n_cols_max,2))
# List of random numbers to use as collision criteria
random_numbers = random_intel.uniform(0,1, n_cols_max)
# Now, we generate random directions, (modulus 1) sigmas
costheta = random_intel.uniform(0,2, size=n_cols_max) - 1
sintheta = np.sqrt(1-costheta**2)
phis = random_intel.uniform(0,2*np.pi, size=n_cols_max)
x_coord = sintheta*np.cos(phis)
y_coord = sintheta*np.sin(phis)
z_coord = costheta
sigmas = np.stack((x_coord, y_coord, z_coord), axis=1)
# This is a vectorized method, it should be faster than the for loop
# Using those random pairs we calculate relative velocities
rel_vs = np.array(list(map(lambda i, j: vel[i]-vel[j], random_pairs[:,0], random_pairs[:,1])))
# And now its modulus by performing a dot product with sigmas array
rel_vs_mod = np.sum(rel_vs*sigmas, axis=1)
# With this information we can check which collisions are valid
ratios = rel_vs_mod / rv_max
valid_cols = ratios > random_numbers
# The valid pairs of particles of each valid collision are:
valid_pairs = random_pairs[valid_cols]
# Number of collisions that take place in this step
cols_current_step = len(valid_pairs)
# Now, we select only those rows that correspond to valid collisions
valid_dotProducts = rel_vs_mod[valid_cols]
# See: https://stackoverflow.com/questions/16229823/how-to-multiply-numpy-2d-array-with-numpy-1d-array
valid_vectors = sigmas[valid_cols] * valid_dotProducts[:, None]
new_vel_components = 0.5*(1+alpha) * valid_vectors
valid_is = valid_pairs[:,0]
valid_js = valid_pairs[:,1]
# Updating the velocities array with its new values
vel[valid_is] -= new_vel_components
vel[valid_js] += new_vel_components
return vel, cols_current_step, rem
def propagate(t, pos, vel, LX, LY, LZ):
# Free stream of particles
pos += t*vel
# This is to account for the periodic boundaries
pos[:,0] -= np.floor(pos[:,0]/LX)*LX
pos[:,1] -= np.floor(pos[:,1]/LY)*LY
pos[:,2] -= np.floor(pos[:,2]/LZ)*LZ
return pos
def findMaximumRelativeVelocity(v2_mean, fwr):
vmax = fwr*np.sqrt(2*v2_mean/3)
return vmax
def relativeVelocity(i, j, vel):
rel_v = vel[i] - vel[j]
modulus = np.linalg.norm(rel_v)
return modulus
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def computeKurtosis(v2):
v4 = v2**2
k = (v4).mean()/(v2.mean())**2
return k
def compute_a2(v2, dimensions):
kurtosis = computeKurtosis(v2)
a2 = (dimensions/(dimensions+2))*kurtosis - 1
return a2
def theoretical_a2(alpha, d, method=2):
if method==1:
a2 = (16*(1-alpha) * (1 - 2*(alpha**2))) / (9 + 24*d - alpha*(41 - 8*d) + 30*(1-alpha)*(alpha**2))
elif method==2:
a2 = (16*(1-alpha) * (1 - 2*(alpha**2))) / (25 + 24*d - alpha*(57 - 8*d) - 2*(1-alpha)*(alpha**2))
return a2
| [
"numpy.random_intel.uniform",
"numpy.sqrt",
"numpy.random_intel.choice",
"numpy.floor",
"numpy.stack",
"numpy.sum",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.random_intel.seed"
] | [((571, 603), 'numpy.random_intel.seed', 'random_intel.seed', ([], {'brng': '"""MT2203"""'}), "(brng='MT2203')\n", (588, 603), False, 'from numpy import random_intel\n'), ((687, 731), 'numpy.random_intel.choice', 'random_intel.choice', (['N'], {'size': '(n_cols_max, 2)'}), '(N, size=(n_cols_max, 2))\n', (706, 731), False, 'from numpy import random_intel\n'), ((810, 848), 'numpy.random_intel.uniform', 'random_intel.uniform', (['(0)', '(1)', 'n_cols_max'], {}), '(0, 1, n_cols_max)\n', (830, 848), False, 'from numpy import random_intel\n'), ((987, 1013), 'numpy.sqrt', 'np.sqrt', (['(1 - costheta ** 2)'], {}), '(1 - costheta ** 2)\n', (994, 1013), True, 'import numpy as np\n'), ((1021, 1072), 'numpy.random_intel.uniform', 'random_intel.uniform', (['(0)', '(2 * np.pi)'], {'size': 'n_cols_max'}), '(0, 2 * np.pi, size=n_cols_max)\n', (1041, 1072), False, 'from numpy import random_intel\n'), ((1183, 1228), 'numpy.stack', 'np.stack', (['(x_coord, y_coord, z_coord)'], {'axis': '(1)'}), '((x_coord, y_coord, z_coord), axis=1)\n', (1191, 1228), True, 'import numpy as np\n'), ((1562, 1593), 'numpy.sum', 'np.sum', (['(rel_vs * sigmas)'], {'axis': '(1)'}), '(rel_vs * sigmas, axis=1)\n', (1568, 1593), True, 'import numpy as np\n'), ((3046, 3067), 'numpy.linalg.norm', 'np.linalg.norm', (['rel_v'], {}), '(rel_v)\n', (3060, 3067), True, 'import numpy as np\n'), ((925, 968), 'numpy.random_intel.uniform', 'random_intel.uniform', (['(0)', '(2)'], {'size': 'n_cols_max'}), '(0, 2, size=n_cols_max)\n', (945, 968), False, 'from numpy import random_intel\n'), ((1098, 1110), 'numpy.cos', 'np.cos', (['phis'], {}), '(phis)\n', (1104, 1110), True, 'import numpy as np\n'), ((1134, 1146), 'numpy.sin', 'np.sin', (['phis'], {}), '(phis)\n', (1140, 1146), True, 'import numpy as np\n'), ((2730, 2754), 'numpy.floor', 'np.floor', (['(pos[:, 0] / LX)'], {}), '(pos[:, 0] / LX)\n', (2738, 2754), True, 'import numpy as np\n'), ((2771, 2795), 'numpy.floor', 'np.floor', (['(pos[:, 1] / LY)'], {}), '(pos[:, 1] / LY)\n', (2779, 2795), True, 'import numpy as np\n'), ((2812, 2836), 'numpy.floor', 'np.floor', (['(pos[:, 2] / LZ)'], {}), '(pos[:, 2] / LZ)\n', (2820, 2836), True, 'import numpy as np\n'), ((2926, 2950), 'numpy.sqrt', 'np.sqrt', (['(2 * v2_mean / 3)'], {}), '(2 * v2_mean / 3)\n', (2933, 2950), True, 'import numpy as np\n')] |
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torchaudio
from constants import INPUT_SAMPLE_RATE, TARGET_SAMPLE_RATE
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class SegmentationDataset(Dataset):
"""Base class for FixedSegmentationDataset and RandomSegmentationDataset"""
def __init__(
self,
path_to_dataset: str,
split_name: str,
) -> None:
"""
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split
"""
super().__init__()
self.path_to_dataset = Path(path_to_dataset)
self.split_name = split_name
self.input_sr = INPUT_SAMPLE_RATE
self.target_sr = TARGET_SAMPLE_RATE
self.in_trg_ratio = self.input_sr / self.target_sr
self.trg_in_ratio = 1 / self.in_trg_ratio
# load the talks and the actual segments
self.talks_df = pd.read_csv(
self.path_to_dataset / f"{self.split_name}_talks.tsv", sep="\t", index_col=0
)
self.segments_df = pd.read_csv(
self.path_to_dataset / f"{self.split_name}_segments.tsv",
sep="\t",
index_col=0,
)
self.columns = ["talk_id", "start", "end", "duration", "included"]
# to calculate percentage of positive examples
self.n_pos, self.n_all = 0, 0
def _secs_to_outframes(self, x):
# from seconds to output space
return np.round(x * self.target_sr).astype(int)
def _outframes_to_inframes(self, x):
# from output space to input space
return np.round(x * self.in_trg_ratio).astype(int)
def _inframes_to_outframes(self, x):
# from input space to output space
return np.round(x * self.trg_in_ratio).astype(int)
def _secs_to_inframes(self, x):
# from seconds to input space
return np.round(x * self.input_sr).astype(int)
def _get_targets_for_talk(self, sgm_df: pd.DataFrame, talk_id: str) -> pd.DataFrame:
"""
Given a segmentation of a talk (sgm_df), find for
each random segment the true_starts and true_ends that it includes.
They are in string form separated by commas.
Ff they are none, an empty string is passed.
Args:
sgm_df (pd.DataFrame): a random segmentation of a wav
talk_id (str): unique id for the wav
Returns:
pd.DataFrame: sgm_df but with the 'included' column completed
"""
true_sgm_df = self.segments_df.loc[self.segments_df.talk_id == talk_id]
talk_targets = np.zeros(
self.talks_df.loc[self.talks_df.id == talk_id, "total_frames"].values[0]
)
for idx, sgm in true_sgm_df.iterrows():
talk_targets[sgm.start : sgm.end] = 1
for idx, sgm in sgm_df.iterrows():
sgm_targets = self._get_targets_for_segment(
talk_targets[sgm.start : sgm.end]
)
sgm_df.loc[idx, "included"] = (
",".join([f"{s}:{e}" for s, e in sgm_targets]) if sgm_targets else "NA"
)
return sgm_df
def _get_targets_for_segment(self, true_points: np.array) -> list[list[int]]:
"""
Extracts the start and end points of segments in the output space
from a binary vector defining the labels in the input space
Args:
true_points (np.array):
binary label for each frame in the input space of a random segment
Returns:
list[list[int]]: list of tuples (start, end) in the output space
where each tuple defines the start and end of a the true included points
"""
points_of_change = list(np.where(true_points[1:] != true_points[:-1])[0] + 1)
targets = []
for s, e in zip([0] + points_of_change, points_of_change + [len(true_points)]):
if true_points[s] == 1:
s = self._inframes_to_outframes(s)
e = self._inframes_to_outframes(e)
# increase start of next segment if overlaps with end of the prev one
if targets and s <= targets[-1][-1]:
s += 1
targets.append([s, e])
self.n_pos += e - s
self.n_all += self._inframes_to_outframes(len(true_points))
return targets
def _construct_target(self, segment: pd.Series) -> torch.FloatTensor:
"""
Given a random segment, constructs its one-hot target tensor in the output space
"""
target_len = self._inframes_to_outframes(segment.duration)
target = torch.zeros(target_len, dtype=torch.float)
if segment.included != "NA":
for s_e in segment.included.split(","):
s, e = s_e.split(":")
s = int(s)
e = min(int(e), target_len + 1)
target[s:e] = 1
return target
class FixedSegmentationDataset(SegmentationDataset):
def __init__(
self,
path_to_dataset: str,
split_name: str,
segment_length_secs: int = 20,
inference_times: int = 1,
) -> None:
"""
Segmentation dataset to be used during inference
Creates a pool of examples from a fixed-length segmentation of a wav
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
inference_times (int, optional):
How many times to perform inference on different fixed-length segmentations.
Defaults to 1.
"""
super().__init__(path_to_dataset, split_name)
self.segment_length_inframes = self._secs_to_inframes(segment_length_secs)
self.inference_times = inference_times
def generate_fixed_segments(self, talk_id: str, i: int) -> None:
"""
Generates a fixed-length segmentation of a wav
with "i" controlling the begining of the segmentation
so that different values of "i" produce different segmentations
Args:
talk_id (str): unique wav identifier
i (int): indicates the current inference time
and is used to produce a different fixed-length segmentation
minimum allowed is 0 and maximum allowed is inference_times - 1
"""
talk_info = self.talks_df.loc[self.talks_df["id"] == talk_id]
self.talk_path = talk_info["path"].values[0]
self.duration_outframes = self._inframes_to_outframes(
self.talks_df.loc[self.talks_df["id"] == talk_id, "total_frames"].values[0]
)
self.duration_inframes = int(talk_info["total_frames"])
self.fixed_segments_df = pd.DataFrame(columns=self.columns)
start = round(self.segment_length_inframes / self.inference_times * i)
if start > self.duration_inframes:
start = 0
segmentation = np.arange(
start, self.duration_inframes, self.segment_length_inframes
).astype(int)
if segmentation[0] != 0:
segmentation = np.insert(segmentation, 0, 0)
if segmentation[-1] != self.duration_inframes:
if self.duration_inframes - segmentation[-1] < self._secs_to_inframes(2):
segmentation[-1] = self.duration_inframes
else:
segmentation = np.append(segmentation, self.duration_inframes)
self.fixed_segments_df["talk_id"] = talk_id
self.fixed_segments_df["start"] = segmentation[:-1]
self.fixed_segments_df["end"] = segmentation[1:]
self.fixed_segments_df["duration"] = (
self.fixed_segments_df.end - self.fixed_segments_df.start
)
# fill-in targets
self.fixed_segments_df = self._get_targets_for_talk(
self.fixed_segments_df, talk_id
)
def __len__(self) -> int:
return len(self.fixed_segments_df)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
"""
Loads the data for this fixed-length segment
Args:
index (int): segment id in the self.fixed_segments_df
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: target tensor of the segment (output space)
2: starting frame of the segment (output space)
3: ending frame of the segment (output space)
"""
segment = self.fixed_segments_df.iloc[index]
waveform, _ = torchaudio.backend.sox_io_backend.load(
self.talk_path, frame_offset=segment.start, num_frames=segment.duration
)
start = self._inframes_to_outframes(segment.start + 1e-6)
end = self._inframes_to_outframes(segment.end + 1e-6)
target = self._construct_target(segment)
return waveform[0], target, start, end
class RandomSegmentationDataset(SegmentationDataset):
def __init__(
self,
path_to_dataset: str,
split_name: str = "train",
segment_length_secs: int = 20,
seed: int = None,
) -> None:
"""
Segmentation dataset to be used during training.
Creates a pool of examples from a random segmentation of collection of wavs
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split. Defaults to train.
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
seed (int, optional): The random seed to be used for the random segmentation.
Defaults to None
"""
super().__init__(path_to_dataset, split_name)
if seed is not None:
np.random.seed(seed)
self.segment_length_outframes = self._secs_to_outframes(segment_length_secs)
self.max_segment_outframes_overlap = self._secs_to_outframes(
segment_length_secs / 10
)
self.segment_length_inframes = self._secs_to_inframes(segment_length_secs)
# populate the dataset
self.generate_random_segments()
self.pos_class_percentage = self.n_pos / self.n_all
def generate_random_segments(self) -> None:
"""
Creates a new dataset by randomly segmenting each talk
and finding the true targets that correspond to every random segment
"""
print(
f"Generating random segments for {self.path_to_dataset} and {self.split_name} split ..."
)
self.random_segments_df = pd.concat(
[
self._get_targets_for_talk(self._segment_talk(talk), talk["id"])
for _, talk in tqdm(self.talks_df.iterrows())
],
ignore_index=True,
)
def _segment_talk(self, talk: pd.Series) -> pd.DataFrame:
"""
Produces a random segmentation of a given talk from the talks_df
"""
rnd_sgm_df = pd.DataFrame(columns=self.columns)
# sample in 0.02 ms but convert back to frames
start_range = np.arange(
0,
self._inframes_to_outframes(talk["total_frames"]),
step=self.segment_length_outframes - self.max_segment_outframes_overlap,
)
start_range = start_range - np.random.randint(
0, self.max_segment_outframes_overlap, size=len(start_range)
)
start_range = self._outframes_to_inframes(start_range)
rnd_sgm_df[["start", "end"]] = [
(
max(0, start),
min(start + self.segment_length_inframes, talk["total_frames"]),
)
for start in start_range
]
rnd_sgm_df["duration"] = rnd_sgm_df["end"] - rnd_sgm_df["start"]
rnd_sgm_df["talk_id"] = talk["id"]
return rnd_sgm_df
def __len__(self) -> int:
return len(self.random_segments_df)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int]:
"""
Loads the data for this example of a random segment
Args:
index (int): the index of the random segment in the random_segments_df
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: target tensor of the segment (output space)
2: starting frame of the segment (output space)
3: ending frame of the segment (output space)
"""
segment = self.random_segments_df.iloc[index]
talk_path = self.talks_df.loc[
self.talks_df.id == segment.talk_id, "path"
].values[0]
# get input
wavefrom, _ = torchaudio.backend.sox_io_backend.load(
talk_path, frame_offset=segment.start, num_frames=segment.duration
)
target = self._construct_target(segment)
start = self._inframes_to_outframes(segment.start + 1e-6)
end = self._inframes_to_outframes(segment.end + 1e-6)
return wavefrom[0], target, start, end
class MultRandomSegmentationDataset(RandomSegmentationDataset):
def __init__(
self,
dataset_paths: list[str],
splits: list[str],
segment_length_secs: int = 20,
seed: int = None,
) -> None:
"""
Segmentation dataset to be used during multilingual traning.
Creates a pool of examples by randomly segmenting many wav collections
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split. Defaults to train.
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
seed (int, optional): The random seed to be used for the random segmentation.
Defaults to None
"""
# init data variables
self.random_segments_df_parent = pd.DataFrame()
self.talks_df_parent = pd.DataFrame()
self.segments_df_parent = pd.DataFrame()
self.n_pos_parent, self.n_all_parent = 0, 0
# iterativelly populate the dataset
for dataset_path, split in zip(dataset_paths, splits):
super().__init__(dataset_path, split, segment_length_secs, seed)
self.random_segments_df_parent = pd.concat(
[self.random_segments_df_parent, self.random_segments_df],
ignore_index=True,
)
self.talks_df_parent = pd.concat(
[self.talks_df_parent, self.talks_df], ignore_index=True
)
self.segments_df_parent = pd.concat(
[self.segments_df_parent, self.segments_df], ignore_index=True
)
self.n_pos_parent += self.n_pos
self.n_all_parent += self.n_all
self.pos_class_percentage = self.n_pos_parent / self.n_all_parent
self.random_segments_df = self.random_segments_df_parent
self.talks_df = self.talks_df_parent
self.segments_df = self.segments_df_parent
class FixedSegmentationDatasetNoTarget(Dataset):
def __init__(
self,
path_to_wav: str,
segment_length: int = 20,
inference_times: int = 1,
) -> None:
"""[summary]
Args:
path_to_wavs (str): [description]
segment_length (int, optional): [description]. Defaults to 20.
inference_times (int, optional): [description]. Defaults to 1.
"""
super().__init__()
self.input_sr = INPUT_SAMPLE_RATE
self.target_sr = TARGET_SAMPLE_RATE
self.in_trg_ratio = self.input_sr / self.target_sr
self.trg_in_ratio = 1 / self.in_trg_ratio
self.segment_length_inframes = self._secs_to_inframes(segment_length)
self.inference_times = inference_times
self.path_to_wav = path_to_wav
self.duration_inframes = torchaudio.info(self.path_to_wav).num_frames
self.duration_outframes = self._inframes_to_outframes(self.duration_inframes)
self.sample_rate = torchaudio.info(self.path_to_wav).sample_rate
assert (
self.sample_rate == self.input_sr
), f"Audio needs to have sample rate of {self.input_sr}"
def _inframes_to_outframes(self, x):
# from input space to output space
return np.round(x * self.trg_in_ratio).astype(int)
def _secs_to_inframes(self, x):
# from seconds to input space
return np.round(x * self.input_sr).astype(int)
def fixed_length_segmentation(self, i: int) -> None:
"""
Generates a fixed-length segmentation of a wav
with "i" controlling the begining of the segmentation
so that different values of "i" produce different segmentations
Args:
talk_id (str): unique wav identifier
i (int): indicates the current inference time
and is used to produce a different fixed-length segmentation
minimum allowed is 0 and maximum allowed is inference_times - 1
"""
start = round(self.segment_length_inframes / self.inference_times * i)
if start > self.duration_inframes:
start = 0
segmentation = np.arange(
start, self.duration_inframes, self.segment_length_inframes
).astype(int)
if segmentation[0] != 0:
segmentation = np.insert(segmentation, 0, 0)
if segmentation[-1] != self.duration_inframes:
if self.duration_inframes - segmentation[-1] < self._secs_to_inframes(2):
segmentation[-1] = self.duration_inframes
else:
segmentation = np.append(segmentation, self.duration_inframes)
self.starts = segmentation[:-1]
self.ends = segmentation[1:]
def __len__(self) -> int:
return len(self.starts)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
"""
Loads the data for this fixed-length segment
Args:
index (int): index of the segment in the fixed length segmentation
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: None for consistency with datasets that have targets
1: starting frame of the segment (output space)
2: ending frame of the segment (output space)
"""
waveform, _ = torchaudio.backend.sox_io_backend.load(
self.path_to_wav,
frame_offset=self.starts[index],
num_frames=self.ends[index] - self.starts[index],
)
start = self._inframes_to_outframes(self.starts[index] + 1e-6)
end = self._inframes_to_outframes(self.ends[index] + 1e-6)
return waveform[0], None, start, end
class RandomDataloaderGenerator:
def __init__(
self,
dataset_roots: str,
batch_size: int,
split_name: str,
num_workers: int = 0,
segment_length: int = 20,
) -> None:
"""
Helper object to be used in each epoch of training
to produce a different random segmentation of the training data
Args:
dataset_roots (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
batch_size (int): training batch size (in number of examples)
split_name (str): the name of the dataset split
num_workers (int, optional): number of workers for the dataloader.
Defaults to 0.
segment_length (int, optional):
Length of the segments (in seconds) to be produced during the random segmentation.
Defaults to 20.
"""
self.dataset_roots = dataset_roots
self.num_workers = num_workers
self.split_name = split_name
self.batch_size = batch_size
# for the multilingual training, dataset_roots is comma separated
if "," in self.dataset_roots:
self.is_mult = True
else:
self.is_mult = False
self.segment_length = segment_length
self.max_seed = 2 ** 32 - 1
def generate(self) -> DataLoader:
"""
Generates a random segmentation of the entire dataset
and returns a dataloader object for it
"""
if self.is_mult:
dataset = MultRandomSegmentationDataset(
self.dataset_roots.split(","),
self.split_name.split(","),
segment_length_secs=self.segment_length,
seed=np.random.randint(0, self.max_seed),
)
else:
dataset = RandomSegmentationDataset(
self.dataset_roots,
self.split_name,
segment_length_secs=self.segment_length,
seed=np.random.randint(0, self.max_seed),
)
dataloader = DataLoader(
dataset,
batch_size=self.batch_size,
collate_fn=segm_collate_fn,
num_workers=self.num_workers,
shuffle=True,
)
return dataloader
class FixedDataloaderGenerator:
def __init__(
self,
dataset_root: str,
batch_size: int,
split_name: str,
num_workers: int = 0,
segment_length: int = 20,
inference_times: int = 1,
) -> None:
"""
Helper object to be used during inference in order to generate the
fixed-length segmentations of a wav collection
Args:
dataset_roots (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
batch_size (int): training batch size (in number of examples)
split_name (str): the name of the dataset split
num_workers (int, optional): number of workers for the dataloader.
Defaults to 0.
segment_length (int, optional):
Length of the segments (in seconds) to be produced during the random segmentation.
Defaults to 20.
inference_times (int, optional):
The number of different fixed-length segmentations to produce
from each wav. Defaults to 1.
"""
self.batch_size = batch_size
self.num_workers = num_workers
self.lang_pair = Path(dataset_root).name
self.dataset = FixedSegmentationDataset(
dataset_root,
split_name,
segment_length_secs=segment_length,
inference_times=inference_times,
)
def generate(self, talk_id: str, i: int) -> DataLoader:
"""
Generates a fixed segmentation of a specific talk_id.
The iteration (<= inference_times) controls the points of the fixed segmentation
to introduce different overlaps. Returns a dataloder for this dataset.
Args:
talk_id (str): unique wav id
i (int): iteration in (0, inference_times)
Returns:
DataLoader: a torch dataloader based on a FixedSegmentationDataset
"""
self.dataset.generate_fixed_segments(talk_id, i)
dataloder = DataLoader(
self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=False,
shuffle=False,
collate_fn=segm_collate_fn,
)
return dataloder
def get_talk_ids(self) -> list:
return self.dataset.talks_df["id"].tolist()
def segm_collate_fn(
batch: list,
) -> Tuple[
torch.FloatTensor,
torch.FloatTensor,
torch.LongTensor,
torch.BoolTensor,
list[bool],
list[int],
list[int],
]:
"""
(inference) collate function for the dataloader of the SegmentationDataset
Args:
batch (list): list of examples from SegmentationDataset
Returns:
Tuple[ torch.FloatTensor, torch.FloatTensor, torch.LongTensor, torch.BoolTensor, list[bool], list[int], list[int], ]:
0: 2D tensor, padded and normalized waveforms for each random segment
1: 2D tensor, binary padded targets for each random segment (output space)
2: 2D tensor, binary mask for wav2vec 2.0 (input space)
3: 2D tensor, binary mask for audio-frame-classifier (output space)
4: a '0' indicates that the whole example is empty (torch.zeros)
5: the start frames of the segments (output space)
6: the end frames of the segments (output space)
"""
included = [bool(example[0].sum()) for example in batch]
starts = [example[2] for example in batch]
ends = [example[3] for example in batch]
# sequence lengths
in_seq_len = [len(example[0]) for example in batch]
out_seq_len = [end - start for start, end in zip(starts, ends)]
bs = len(in_seq_len)
# pad and concat
audio = torch.cat(
[
F.pad(example[0], (0, max(in_seq_len) - len(example[0]))).unsqueeze(0)
for example in batch
]
)
# check if the batch contains also targets
if batch[0][1] is not None:
target = torch.cat(
[
F.pad(example[1], (0, max(out_seq_len) - len(example[1]))).unsqueeze(0)
for example in batch
]
)
else:
target = None
# normalize input
# only for inputs that have non-zero elements
included_ = torch.tensor(included).bool()
audio[included_] = (
audio[included_] - torch.mean(audio[included_], dim=1, keepdim=True)
) / torch.std(audio[included_], dim=1, keepdim=True)
# get masks
in_mask = torch.ones(audio.shape, dtype=torch.long)
out_mask = torch.ones([bs, max(out_seq_len)], dtype=torch.bool)
for i, in_sl, out_sl in zip(range(bs), in_seq_len, out_seq_len):
in_mask[i, in_sl:] = 0
out_mask[i, out_sl:] = 0
return (audio, target, in_mask, out_mask, included, starts, ends)
| [
"pandas.read_csv",
"torchaudio.backend.sox_io_backend.load",
"numpy.arange",
"pathlib.Path",
"torch.mean",
"numpy.where",
"torchaudio.info",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.round",
"torch.std",
"numpy.insert",
"numpy.append",
"torch.tensor",
"numpy.zeros",
"numpy.random... | [((26545, 26586), 'torch.ones', 'torch.ones', (['audio.shape'], {'dtype': 'torch.long'}), '(audio.shape, dtype=torch.long)\n', (26555, 26586), False, 'import torch\n'), ((788, 809), 'pathlib.Path', 'Path', (['path_to_dataset'], {}), '(path_to_dataset)\n', (792, 809), False, 'from pathlib import Path\n'), ((1117, 1210), 'pandas.read_csv', 'pd.read_csv', (["(self.path_to_dataset / f'{self.split_name}_talks.tsv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(self.path_to_dataset / f'{self.split_name}_talks.tsv', sep='\\t',\n index_col=0)\n", (1128, 1210), True, 'import pandas as pd\n'), ((1256, 1353), 'pandas.read_csv', 'pd.read_csv', (["(self.path_to_dataset / f'{self.split_name}_segments.tsv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(self.path_to_dataset / f'{self.split_name}_segments.tsv', sep=\n '\\t', index_col=0)\n", (1267, 1353), True, 'import pandas as pd\n'), ((2798, 2885), 'numpy.zeros', 'np.zeros', (["self.talks_df.loc[self.talks_df.id == talk_id, 'total_frames'].values[0]"], {}), "(self.talks_df.loc[self.talks_df.id == talk_id, 'total_frames'].\n values[0])\n", (2806, 2885), True, 'import numpy as np\n'), ((4846, 4888), 'torch.zeros', 'torch.zeros', (['target_len'], {'dtype': 'torch.float'}), '(target_len, dtype=torch.float)\n', (4857, 4888), False, 'import torch\n'), ((7163, 7197), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.columns'}), '(columns=self.columns)\n', (7175, 7197), True, 'import pandas as pd\n'), ((9047, 9163), 'torchaudio.backend.sox_io_backend.load', 'torchaudio.backend.sox_io_backend.load', (['self.talk_path'], {'frame_offset': 'segment.start', 'num_frames': 'segment.duration'}), '(self.talk_path, frame_offset=segment\n .start, num_frames=segment.duration)\n', (9085, 9163), False, 'import torchaudio\n'), ((11594, 11628), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.columns'}), '(columns=self.columns)\n', (11606, 11628), True, 'import pandas as pd\n'), ((13372, 13483), 'torchaudio.backend.sox_io_backend.load', 'torchaudio.backend.sox_io_backend.load', (['talk_path'], {'frame_offset': 'segment.start', 'num_frames': 'segment.duration'}), '(talk_path, frame_offset=segment.\n start, num_frames=segment.duration)\n', (13410, 13483), False, 'import torchaudio\n'), ((14681, 14695), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14693, 14695), True, 'import pandas as pd\n'), ((14727, 14741), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14739, 14741), True, 'import pandas as pd\n'), ((14776, 14790), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14788, 14790), True, 'import pandas as pd\n'), ((19271, 19415), 'torchaudio.backend.sox_io_backend.load', 'torchaudio.backend.sox_io_backend.load', (['self.path_to_wav'], {'frame_offset': 'self.starts[index]', 'num_frames': '(self.ends[index] - self.starts[index])'}), '(self.path_to_wav, frame_offset=self.\n starts[index], num_frames=self.ends[index] - self.starts[index])\n', (19309, 19415), False, 'import torchaudio\n'), ((21770, 21893), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'collate_fn': 'segm_collate_fn', 'num_workers': 'self.num_workers', 'shuffle': '(True)'}), '(dataset, batch_size=self.batch_size, collate_fn=segm_collate_fn,\n num_workers=self.num_workers, shuffle=True)\n', (21780, 21893), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((24060, 24207), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'drop_last': '(False)', 'shuffle': '(False)', 'collate_fn': 'segm_collate_fn'}), '(self.dataset, batch_size=self.batch_size, num_workers=self.\n num_workers, drop_last=False, shuffle=False, collate_fn=segm_collate_fn)\n', (24070, 24207), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((26465, 26513), 'torch.std', 'torch.std', (['audio[included_]'], {'dim': '(1)', 'keepdim': '(True)'}), '(audio[included_], dim=1, keepdim=True)\n', (26474, 26513), False, 'import torch\n'), ((7531, 7560), 'numpy.insert', 'np.insert', (['segmentation', '(0)', '(0)'], {}), '(segmentation, 0, 0)\n', (7540, 7560), True, 'import numpy as np\n'), ((10373, 10393), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10387, 10393), True, 'import numpy as np\n'), ((15074, 15165), 'pandas.concat', 'pd.concat', (['[self.random_segments_df_parent, self.random_segments_df]'], {'ignore_index': '(True)'}), '([self.random_segments_df_parent, self.random_segments_df],\n ignore_index=True)\n', (15083, 15165), True, 'import pandas as pd\n'), ((15244, 15311), 'pandas.concat', 'pd.concat', (['[self.talks_df_parent, self.talks_df]'], {'ignore_index': '(True)'}), '([self.talks_df_parent, self.talks_df], ignore_index=True)\n', (15253, 15311), True, 'import pandas as pd\n'), ((15380, 15453), 'pandas.concat', 'pd.concat', (['[self.segments_df_parent, self.segments_df]'], {'ignore_index': '(True)'}), '([self.segments_df_parent, self.segments_df], ignore_index=True)\n', (15389, 15453), True, 'import pandas as pd\n'), ((16669, 16702), 'torchaudio.info', 'torchaudio.info', (['self.path_to_wav'], {}), '(self.path_to_wav)\n', (16684, 16702), False, 'import torchaudio\n'), ((16827, 16860), 'torchaudio.info', 'torchaudio.info', (['self.path_to_wav'], {}), '(self.path_to_wav)\n', (16842, 16860), False, 'import torchaudio\n'), ((18159, 18188), 'numpy.insert', 'np.insert', (['segmentation', '(0)', '(0)'], {}), '(segmentation, 0, 0)\n', (18168, 18188), True, 'import numpy as np\n'), ((23232, 23250), 'pathlib.Path', 'Path', (['dataset_root'], {}), '(dataset_root)\n', (23236, 23250), False, 'from pathlib import Path\n'), ((26325, 26347), 'torch.tensor', 'torch.tensor', (['included'], {}), '(included)\n', (26337, 26347), False, 'import torch\n'), ((26407, 26456), 'torch.mean', 'torch.mean', (['audio[included_]'], {'dim': '(1)', 'keepdim': '(True)'}), '(audio[included_], dim=1, keepdim=True)\n', (26417, 26456), False, 'import torch\n'), ((1658, 1686), 'numpy.round', 'np.round', (['(x * self.target_sr)'], {}), '(x * self.target_sr)\n', (1666, 1686), True, 'import numpy as np\n'), ((1799, 1830), 'numpy.round', 'np.round', (['(x * self.in_trg_ratio)'], {}), '(x * self.in_trg_ratio)\n', (1807, 1830), True, 'import numpy as np\n'), ((1943, 1974), 'numpy.round', 'np.round', (['(x * self.trg_in_ratio)'], {}), '(x * self.trg_in_ratio)\n', (1951, 1974), True, 'import numpy as np\n'), ((2077, 2104), 'numpy.round', 'np.round', (['(x * self.input_sr)'], {}), '(x * self.input_sr)\n', (2085, 2104), True, 'import numpy as np\n'), ((7366, 7436), 'numpy.arange', 'np.arange', (['start', 'self.duration_inframes', 'self.segment_length_inframes'], {}), '(start, self.duration_inframes, self.segment_length_inframes)\n', (7375, 7436), True, 'import numpy as np\n'), ((7809, 7856), 'numpy.append', 'np.append', (['segmentation', 'self.duration_inframes'], {}), '(segmentation, self.duration_inframes)\n', (7818, 7856), True, 'import numpy as np\n'), ((17102, 17133), 'numpy.round', 'np.round', (['(x * self.trg_in_ratio)'], {}), '(x * self.trg_in_ratio)\n', (17110, 17133), True, 'import numpy as np\n'), ((17236, 17263), 'numpy.round', 'np.round', (['(x * self.input_sr)'], {}), '(x * self.input_sr)\n', (17244, 17263), True, 'import numpy as np\n'), ((17994, 18064), 'numpy.arange', 'np.arange', (['start', 'self.duration_inframes', 'self.segment_length_inframes'], {}), '(start, self.duration_inframes, self.segment_length_inframes)\n', (18003, 18064), True, 'import numpy as np\n'), ((18437, 18484), 'numpy.append', 'np.append', (['segmentation', 'self.duration_inframes'], {}), '(segmentation, self.duration_inframes)\n', (18446, 18484), True, 'import numpy as np\n'), ((3936, 3981), 'numpy.where', 'np.where', (['(true_points[1:] != true_points[:-1])'], {}), '(true_points[1:] != true_points[:-1])\n', (3944, 3981), True, 'import numpy as np\n'), ((21436, 21471), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.max_seed'], {}), '(0, self.max_seed)\n', (21453, 21471), True, 'import numpy as np\n'), ((21697, 21732), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.max_seed'], {}), '(0, self.max_seed)\n', (21714, 21732), True, 'import numpy as np\n')] |
import numpy as np
from pylab import imshow, plot, show, gray
N = 1000 # the number of the divisions on the axis
NIT = 10 # f(z) precision
real = np.linspace(-2, 2, N) # Real axis
imaginario = np.linspace(-2, 2, N) # Imaginary axis
matriz_c = np.zeros((N, N), dtype=complex) # matrix for the c values
# now we add the c values on the subspace (limited axis)
for x in range(1, N):
for y in range(1, N):
matriz_c[x][y] = real[x] + 1j*imaginario[y]
# here we create the function to check the numbers inside and out of the mandelbrot set
def iterete(c, NIT):
termo = 0
for i in range(0, NIT):
termo = termo*termo + c
return termo
# now its time to check if the matrix numbers are in the mandelbrot
Mandel = np.zeros((N, N), dtype=float)
for x in range(1, N):
for y in range(1, N):
Mandel[x][y] = iterete(matriz_c[x][y], NIT)
if abs(Mandel[x][y]) > 2: # if the number is not in the set we change it to zero for the density graph
Mandel[x][y] = 0
else:
Mandel[x][y] = abs(Mandel[x][y])
imshow(Mandel.transpose())
gray()
show()
| [
"pylab.gray",
"numpy.linspace",
"numpy.zeros",
"pylab.show"
] | [((164, 185), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'N'], {}), '(-2, 2, N)\n', (175, 185), True, 'import numpy as np\n'), ((227, 248), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'N'], {}), '(-2, 2, N)\n', (238, 248), True, 'import numpy as np\n'), ((287, 318), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'complex'}), '((N, N), dtype=complex)\n', (295, 318), True, 'import numpy as np\n'), ((806, 835), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'float'}), '((N, N), dtype=float)\n', (814, 835), True, 'import numpy as np\n'), ((1179, 1185), 'pylab.gray', 'gray', ([], {}), '()\n', (1183, 1185), False, 'from pylab import imshow, plot, show, gray\n'), ((1187, 1193), 'pylab.show', 'show', ([], {}), '()\n', (1191, 1193), False, 'from pylab import imshow, plot, show, gray\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 9 22:04:01 2022
@author: lukepinkel
"""
import numpy as np
import scipy as sp
import scipy.linalg
import pandas as pd
from ..utilities.random import r_lkj, exact_rmvnorm
class FactorModelSim(object):
def __init__(self, n_vars=12, n_facs=3, L=None, Phi=None,
Psi=None, rng=None, seed=None):
self.rng = np.random.default_rng(seed) if rng is None else rng
self.n_vars = n_vars
self.n_facs = n_facs
self.L = L
self.Phi = Phi
self.Psi = Psi
def _generate_loadings(self, random=False, r_kws=None):
L = np.zeros((self.n_vars, self.n_facs))
inds = list(zip(np.array_split(np.arange(self.n_vars), self.n_facs), np.arange(self.n_facs)))
default_r_kws = dict(low=0.4, high=0.9)
r_kws = {} if r_kws is None else r_kws
r_kws = {**default_r_kws, **r_kws}
for rows, col in inds:
s =len(rows)
if random:
u =self.rng.uniform(size=s, **r_kws)
u = np.sort(u)[::-1]
else:
u = np.linspace(1.0, 0.4, s)
L[rows, col] = u
return L
def _generate_factor_corr(self, random=False, r_kws=None, rho=0.5):
default_r_kws = dict(eta=2.0)
r_kws = {} if r_kws is None else r_kws
r_kws = {**default_r_kws, **r_kws}
if random:
Phi = r_lkj(n=1, dim=self.n_facs, rng=self.rng, **r_kws)
else:
Phi = rho**sp.linalg.toeplitz(np.arange(self.n_facs))
s = (-1)**np.arange(self.n_facs).reshape(-1, 1)
Phi = s*Phi*s.T
return Phi
def _generate_residual_cov(self, random=True, dist=None):
if random:
if dist is None:
dist = lambda size: self.rng.uniform(low=0.3, high=0.7, size=size)
psi = dist(self.n_vars)
else:
psi = np.linspace(0.3, 0.7, self.n_vars)
Psi = np.diag(psi)
return Psi
def simulate_cov(self, loadings_kws=None, factor_corr_kws=None,
residual_cov_kws=None):
loadings_kws = {} if loadings_kws is None else loadings_kws
factor_corr_kws = {} if factor_corr_kws is None else factor_corr_kws
residual_cov_kws = {} if residual_cov_kws is None else residual_cov_kws
self.L = self._generate_loadings(**loadings_kws)
self.Phi = self._generate_factor_corr(**factor_corr_kws)
self.Psi = self._generate_residual_cov(**residual_cov_kws)
self.C = sp.linalg.block_diag(self.Phi, self.Psi)
self.Sigma = self.L.dot(self.Phi).dot(self.L.T) + self.Psi
def simulate_data(self, n_obs=1000, exact=True):
if exact:
Z = exact_rmvnorm(n=n_obs, S=self.C)
else:
Z = self.rng.multivariate_normal(mean=np.zeros(self.n_vars+self.n_facs),
cov=self.C, size=n_obs)
X = Z[:, :self.n_facs].dot(self.L.T) + Z[:, self.n_facs:]
return Z, X
| [
"numpy.random.default_rng",
"numpy.sort",
"numpy.diag",
"numpy.zeros",
"numpy.linspace",
"scipy.linalg.block_diag",
"numpy.arange"
] | [((666, 702), 'numpy.zeros', 'np.zeros', (['(self.n_vars, self.n_facs)'], {}), '((self.n_vars, self.n_facs))\n', (674, 702), True, 'import numpy as np\n'), ((2016, 2028), 'numpy.diag', 'np.diag', (['psi'], {}), '(psi)\n', (2023, 2028), True, 'import numpy as np\n'), ((2606, 2646), 'scipy.linalg.block_diag', 'sp.linalg.block_diag', (['self.Phi', 'self.Psi'], {}), '(self.Phi, self.Psi)\n', (2626, 2646), True, 'import scipy as sp\n'), ((410, 437), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (431, 437), True, 'import numpy as np\n'), ((1967, 2001), 'numpy.linspace', 'np.linspace', (['(0.3)', '(0.7)', 'self.n_vars'], {}), '(0.3, 0.7, self.n_vars)\n', (1978, 2001), True, 'import numpy as np\n'), ((780, 802), 'numpy.arange', 'np.arange', (['self.n_facs'], {}), '(self.n_facs)\n', (789, 802), True, 'import numpy as np\n'), ((1150, 1174), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0.4)', 's'], {}), '(1.0, 0.4, s)\n', (1161, 1174), True, 'import numpy as np\n'), ((742, 764), 'numpy.arange', 'np.arange', (['self.n_vars'], {}), '(self.n_vars)\n', (751, 764), True, 'import numpy as np\n'), ((1095, 1105), 'numpy.sort', 'np.sort', (['u'], {}), '(u)\n', (1102, 1105), True, 'import numpy as np\n'), ((1570, 1592), 'numpy.arange', 'np.arange', (['self.n_facs'], {}), '(self.n_facs)\n', (1579, 1592), True, 'import numpy as np\n'), ((2908, 2943), 'numpy.zeros', 'np.zeros', (['(self.n_vars + self.n_facs)'], {}), '(self.n_vars + self.n_facs)\n', (2916, 2943), True, 'import numpy as np\n'), ((1616, 1638), 'numpy.arange', 'np.arange', (['self.n_facs'], {}), '(self.n_facs)\n', (1625, 1638), True, 'import numpy as np\n')] |
"""Plot match's mod* files
usage:
cd matchX.X/Model/data
e.g.,
python -m match.makemod.plot_mods -p 'mod*'
"""
from __future__ import print_function
import glob
import os
import sys
import argparse
import numpy as np
import matplotlib.pylab as plt
from ..scripts.config import EXT
def plot_mods(sub=None, pref='mod1_*', overwrite=False):
"""
make a plot of Mbol vs Log Te of tracks to go to MATCH or the
MATCH tracks themselves
color bar axis is either Nstars (if match tracks) or logAge (if unprocessed
tracks)
Parameters
----------
sub : string
the subdirectory to operate in [optional]
pref : string
the prefix search string of the track names [mod1*]
if plotting unprocessed tracks pref should end with .dat.
overwrite : bool [False]
overwrite existing plots
NB: Unprocessed match track plotting is not tested!
TODO: either:
hard code color bar limits or
set all axes limits as options or
use axes limits from makemod.cpp
"""
here = os.getcwd()
if sub is not None:
assert os.path.isdir(sub), 'sub directory not found'
os.chdir(sub)
# i = LogT, j = Mbol k = Nstars or logAge
# mod format:Mbol Log_Te Nstars ...
i = 1
j = 0
k = 2
zstr = 'Nstars'
# unprocessed format logAge Mass logTe Mbol ...
if pref.endswith('.dat'):
i = 2
j = 3
k = 1
zstr = 'Age'
modfiles = glob.glob(pref)
if len(modfiles) == 0:
print('{} not found'.format(pref))
for fname in modfiles:
figname = '{}{}'.format(fname, EXT)
if fname.endswith('.png'):
continue
if os.path.isfile(figname) and not overwrite:
print('found {} and not overwriting'.format(figname))
continue
data = np.loadtxt(fname).T
_, ax = plt.subplots()
try:
scr = ax.scatter(data[i], data[j], c=np.log10(data[k]),
cmap=plt.cm.Blues, edgecolor='none')
cbar = plt.colorbar(scr)
cbar.set_label('log {}'.format(zstr))
except:
ax.plot(data[i], data[j], color='k')
ax.set_xlim(ax.get_xlim()[::-1])
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_ylim(13, -14.0)
ax.set_xlim(5.5, 3.0)
ax.set_title(fname.replace('_', r'\_'))
ax.set_xlabel('Log T')
ax.set_ylabel('Mbol')
plt.savefig(figname)
print('wrote {}'.format(figname))
plt.close()
os.chdir(here)
def main(argv):
"""Main caller for plot_mods."""
parser = argparse.ArgumentParser(description="Plot mod* files in data/")
parser.add_argument('-s', '--sub', type=str,
help='subdirectory name')
parser.add_argument('-r', '--recursive', action='store_true',
help='run on all subdirectories')
parser.add_argument('-f', '--overwrite', action='store_true',
help='overwrite plots')
parser.add_argument('-p', '--pref', type=str, default='mod1*',
help='search string for mod files')
args = parser.parse_args(argv)
if not os.getcwd().endswith('data'):
print('warning, this should run in matchX.X/Model/data')
subs = [args.sub]
if args.recursive:
subs = [s for s in os.listdir('.') if os.path.isdir(s)]
for sub in subs:
plot_mods(sub=sub, pref=args.pref, overwrite=args.overwrite)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"matplotlib.pylab.subplots",
"matplotlib.pylab.savefig",
"os.listdir",
"numpy.log10",
"argparse.ArgumentParser",
"matplotlib.pylab.colorbar",
"os.getcwd",
"os.chdir",
"os.path.isfile",
"os.path.isdir",
"numpy.loadtxt",
"matplotlib.pylab.close",
"glob.glob"
] | [((1045, 1056), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1054, 1056), False, 'import os\n'), ((1462, 1477), 'glob.glob', 'glob.glob', (['pref'], {}), '(pref)\n', (1471, 1477), False, 'import glob\n'), ((2529, 2543), 'os.chdir', 'os.chdir', (['here'], {}), '(here)\n', (2537, 2543), False, 'import os\n'), ((2612, 2675), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot mod* files in data/"""'}), "(description='Plot mod* files in data/')\n", (2635, 2675), False, 'import argparse\n'), ((1096, 1114), 'os.path.isdir', 'os.path.isdir', (['sub'], {}), '(sub)\n', (1109, 1114), False, 'import os\n'), ((1150, 1163), 'os.chdir', 'os.chdir', (['sub'], {}), '(sub)\n', (1158, 1163), False, 'import os\n'), ((1867, 1881), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (1879, 1881), True, 'import matplotlib.pylab as plt\n'), ((2442, 2462), 'matplotlib.pylab.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (2453, 2462), True, 'import matplotlib.pylab as plt\n'), ((2513, 2524), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (2522, 2524), True, 'import matplotlib.pylab as plt\n'), ((1686, 1709), 'os.path.isfile', 'os.path.isfile', (['figname'], {}), '(figname)\n', (1700, 1709), False, 'import os\n'), ((1831, 1848), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (1841, 1848), True, 'import numpy as np\n'), ((2048, 2065), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['scr'], {}), '(scr)\n', (2060, 2065), True, 'import matplotlib.pylab as plt\n'), ((3192, 3203), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3201, 3203), False, 'import os\n'), ((3360, 3375), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3370, 3375), False, 'import os\n'), ((3379, 3395), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (3392, 3395), False, 'import os\n'), ((1944, 1961), 'numpy.log10', 'np.log10', (['data[k]'], {}), '(data[k])\n', (1952, 1961), True, 'import numpy as np\n')] |
import numpy as np
import cPickle as pickle
from classifier import Classifier
from util.layers import *
from util.dump import dump_big_matrix
class NNClassifier(Classifier):
def __init__(self, D, H, W, K, iternum):
Classifier.__init__(self, D, H, W, K, iternum)
self.L = 100 # size of hidden layer
""" Layer 1 Parameters """
# weight matrix: [M * L]
self.A1 = 0.01 * np.random.randn(self.M, self.L)
# bias: [1 * L]
self.b1 = np.zeros((1,self.L))
""" Layer 3 Parameters """
# weight matrix: [L * K]
self.A3 = 0.01 * np.random.randn(self.L, K)
# bias: [1 * K]
self.b3 = np.zeros((1,K))
""" Hyperparams """
# learning rate
self.rho = 1e-2
# momentum
self.mu = 0.9
# reg strencth
self.lam = 0.1
# velocity for A1: [M * L]
self.v1 = np.zeros((self.M, self.L))
# velocity for A3: [L * K]
self.v3 = np.zeros((self.L, K))
return
def load(self, path):
data = pickle.load(open(path + "layer1"))
assert(self.A1.shape == data['w'].shape)
assert(self.b1.shape == data['b'].shape)
self.A1 = data['w']
self.b1 = data['b']
data = pickle.load(open(path + "layer3"))
assert(self.A3.shape == data['w'].shape)
assert(self.b3.shape == data['b'].shape)
self.A3 = data['w']
self.b3 = data['b']
return
def param(self):
return [self.A1, self.b1, self.A3, self.b3]
def forward(self, X, dump_chunks = -1):
A1 = self.A1
b1 = self.b1
A3 = self.A3
b3 = self.b3
"""
Layer 1 : linear
Layer 2 : ReLU
Layer 3 : linear
"""
layer1 = linear_forward(X, A1, b1)
layer2 = ReLU_forward(layer1)
layer3 = linear_forward(layer2, A3, b3)
if dump_chunks > 0:
dump_big_matrix(layer1, "nn_l1_mat", dump_chunks)
dump_big_matrix(layer2, "nn_l2_mat", dump_chunks)
dump_big_matrix(layer3, "nn_l3_mat", dump_chunks)
return [layer1, layer2, layer3]
def backward(self, X, layers, Y, dump_chunks = -1):
A1 = self.A1
b1 = self.b1
A3 = self.A3
b3 = self.b3
layer1, layer2, layer3 = layers
""" softmax classification """
L, dLdl3 = softmax_loss(layer3, Y)
""" backpropagation for Layer 3 """
dLdl2, dLdA3, dLdb3 = linear_backward(dLdl3, layer2, A3)
""" backpropagation for Layer 2 """
dLdl1 = ReLU_backward(dLdl2, layer1)
""" backpropagation for Layer 1 """
dLdX, dLdA1, dLdb1 = linear_backward(dLdl1, X, A1)
""" regularization """
L += 0.5 * self.lam * (np.sum(A1*A1) + np.sum(A3*A3))
""" regularization gradient """
dLdA3 = dLdA3.reshape(A3.shape)
dLdA1 = dLdA1.reshape(A1.shape)
dLdA3 += self.lam * A3
dLdA1 += self.lam * A1
""" tune the parameter """
self.v1 = self.mu * self.v1 - self.rho * dLdA1
self.v3 = self.mu * self.v3 - self.rho * dLdA3
self.A1 += self.v1
self.A3 += self.v3
self.b1 += - self.rho * dLdb1
self.b3 += - self.rho * dLdb3
""" dump """
if dump_chunks > 0:
dump_big_matrix(dLdl3, "nn_dLdl3_mat", dump_chunks)
dump_big_matrix(dLdl2, "nn_dLdl2_mat", dump_chunks)
dump_big_matrix(dLdl1, "nn_dLdl1_mat", dump_chunks)
dump_big_matrix(dLdX, "nn_dLdX_mat", dump_chunks)
dump_big_matrix(dLdA3, "nn_dLdA3_mat", 1)
dump_big_matrix(dLdb3, "nn_dLdb3_mat", 1)
dump_big_matrix(dLdA1, "nn_dLdA1_mat", 1)
dump_big_matrix(dLdb1, "nn_dLdb1_mat", 1)
return L
| [
"classifier.Classifier.__init__",
"util.dump.dump_big_matrix",
"numpy.sum",
"numpy.zeros",
"numpy.random.randn"
] | [((222, 268), 'classifier.Classifier.__init__', 'Classifier.__init__', (['self', 'D', 'H', 'W', 'K', 'iternum'], {}), '(self, D, H, W, K, iternum)\n', (241, 268), False, 'from classifier import Classifier\n'), ((457, 478), 'numpy.zeros', 'np.zeros', (['(1, self.L)'], {}), '((1, self.L))\n', (465, 478), True, 'import numpy as np\n'), ((621, 637), 'numpy.zeros', 'np.zeros', (['(1, K)'], {}), '((1, K))\n', (629, 637), True, 'import numpy as np\n'), ((820, 846), 'numpy.zeros', 'np.zeros', (['(self.M, self.L)'], {}), '((self.M, self.L))\n', (828, 846), True, 'import numpy as np\n'), ((893, 914), 'numpy.zeros', 'np.zeros', (['(self.L, K)'], {}), '((self.L, K))\n', (901, 914), True, 'import numpy as np\n'), ((391, 422), 'numpy.random.randn', 'np.random.randn', (['self.M', 'self.L'], {}), '(self.M, self.L)\n', (406, 422), True, 'import numpy as np\n'), ((560, 586), 'numpy.random.randn', 'np.random.randn', (['self.L', 'K'], {}), '(self.L, K)\n', (575, 586), True, 'import numpy as np\n'), ((1735, 1784), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['layer1', '"""nn_l1_mat"""', 'dump_chunks'], {}), "(layer1, 'nn_l1_mat', dump_chunks)\n", (1750, 1784), False, 'from util.dump import dump_big_matrix\n'), ((1791, 1840), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['layer2', '"""nn_l2_mat"""', 'dump_chunks'], {}), "(layer2, 'nn_l2_mat', dump_chunks)\n", (1806, 1840), False, 'from util.dump import dump_big_matrix\n'), ((1847, 1896), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['layer3', '"""nn_l3_mat"""', 'dump_chunks'], {}), "(layer3, 'nn_l3_mat', dump_chunks)\n", (1862, 1896), False, 'from util.dump import dump_big_matrix\n'), ((2993, 3044), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdl3', '"""nn_dLdl3_mat"""', 'dump_chunks'], {}), "(dLdl3, 'nn_dLdl3_mat', dump_chunks)\n", (3008, 3044), False, 'from util.dump import dump_big_matrix\n'), ((3051, 3102), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdl2', '"""nn_dLdl2_mat"""', 'dump_chunks'], {}), "(dLdl2, 'nn_dLdl2_mat', dump_chunks)\n", (3066, 3102), False, 'from util.dump import dump_big_matrix\n'), ((3109, 3160), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdl1', '"""nn_dLdl1_mat"""', 'dump_chunks'], {}), "(dLdl1, 'nn_dLdl1_mat', dump_chunks)\n", (3124, 3160), False, 'from util.dump import dump_big_matrix\n'), ((3167, 3216), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdX', '"""nn_dLdX_mat"""', 'dump_chunks'], {}), "(dLdX, 'nn_dLdX_mat', dump_chunks)\n", (3182, 3216), False, 'from util.dump import dump_big_matrix\n'), ((3223, 3264), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdA3', '"""nn_dLdA3_mat"""', '(1)'], {}), "(dLdA3, 'nn_dLdA3_mat', 1)\n", (3238, 3264), False, 'from util.dump import dump_big_matrix\n'), ((3271, 3312), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdb3', '"""nn_dLdb3_mat"""', '(1)'], {}), "(dLdb3, 'nn_dLdb3_mat', 1)\n", (3286, 3312), False, 'from util.dump import dump_big_matrix\n'), ((3319, 3360), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdA1', '"""nn_dLdA1_mat"""', '(1)'], {}), "(dLdA1, 'nn_dLdA1_mat', 1)\n", (3334, 3360), False, 'from util.dump import dump_big_matrix\n'), ((3367, 3408), 'util.dump.dump_big_matrix', 'dump_big_matrix', (['dLdb1', '"""nn_dLdb1_mat"""', '(1)'], {}), "(dLdb1, 'nn_dLdb1_mat', 1)\n", (3382, 3408), False, 'from util.dump import dump_big_matrix\n'), ((2503, 2518), 'numpy.sum', 'np.sum', (['(A1 * A1)'], {}), '(A1 * A1)\n', (2509, 2518), True, 'import numpy as np\n'), ((2519, 2534), 'numpy.sum', 'np.sum', (['(A3 * A3)'], {}), '(A3 * A3)\n', (2525, 2534), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.